Compare commits

...

7 Commits

Author SHA1 Message Date
Ahmed Ibrahim
a33758b396 Cover v2 role service tier precedence 2026-05-11 23:55:58 +03:00
Ahmed Ibrahim
9b0fc86046 Match role service tier precedence 2026-05-11 23:50:47 +03:00
Ahmed Ibrahim
04a51a3542 Simplify role service tier flow 2026-05-11 20:29:02 +03:00
Ahmed Ibrahim
6cd2781d51 codex: fix CI failure on PR #22169 2026-05-11 19:50:50 +03:00
Ahmed Ibrahim
62f7566594 Honor role-defined spawn service tiers 2026-05-11 18:58:23 +03:00
Ahmed Ibrahim
7efdaaedbb Tighten spawn agent service tier guidance 2026-05-11 18:55:00 +03:00
Ahmed Ibrahim
bb1794622b Add spawn agent service tier overrides 2026-05-11 14:47:54 +03:00
8 changed files with 755 additions and 4 deletions

View File

@@ -6,6 +6,7 @@ use crate::skills_load_input_from_config;
use codex_config::ConfigLayerStackOrdering;
use codex_core_plugins::PluginsManager;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ReasoningEffort;
use codex_utils_absolute_path::test_support::PathExt;
@@ -214,6 +215,36 @@ async fn apply_role_preserves_unspecified_keys() {
);
}
#[tokio::test]
async fn apply_role_reports_explicit_service_tier() {
let (home, mut config) = test_config_with_cli_overrides(Vec::new()).await;
let role_path = write_role_config(
&home,
"tiered-role.toml",
r#"developer_instructions = "Stay focused"
service_tier = "priority"
"#,
)
.await;
config.agent_roles.insert(
"custom".to_string(),
AgentRoleConfig {
description: None,
config_file: Some(role_path),
nickname_candidates: None,
},
);
apply_role_to_config(&mut config, Some("custom"))
.await
.expect("custom role should apply");
assert_eq!(
config.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn apply_role_preserves_active_profile_and_model_provider() {
let home = TempDir::new().expect("create temp dir");

View File

@@ -82,6 +82,9 @@ impl ToolHandler for Handler {
.await;
let mut config =
build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?;
if let Some(service_tier) = args.service_tier.as_ref() {
config.service_tier = Some(service_tier.clone());
}
if args.fork_context {
reject_full_fork_spawn_overrides(
role_name,
@@ -101,6 +104,13 @@ impl ToolHandler for Handler {
.await
.map_err(FunctionCallError::RespondToModel)?;
}
apply_spawn_agent_service_tier(
&session,
&mut config,
turn.config.service_tier.as_deref(),
args.service_tier.as_deref(),
)
.await?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
@@ -203,6 +213,7 @@ struct SpawnAgentArgs {
agent_type: Option<String>,
model: Option<String>,
reasoning_effort: Option<ReasoningEffort>,
service_tier: Option<String>,
#[serde(default)]
fork_context: bool,
}

View File

@@ -336,6 +336,60 @@ pub(crate) async fn apply_requested_spawn_agent_model_overrides(
Ok(())
}
pub(crate) async fn apply_spawn_agent_service_tier(
session: &Session,
config: &mut Config,
parent_service_tier: Option<&str>,
requested_service_tier: Option<&str>,
) -> Result<(), FunctionCallError> {
let requested_service_tier_is_effective =
requested_service_tier.is_some_and(|requested_service_tier| {
config.service_tier.as_deref() == Some(requested_service_tier)
});
let Some(candidate_service_tier) = config
.service_tier
.clone()
.or_else(|| parent_service_tier.map(str::to_string))
else {
config.service_tier = None;
return Ok(());
};
let model = config.model.clone().ok_or_else(|| {
FunctionCallError::RespondToModel(
"spawn_agent could not resolve the child model for service tier validation".to_string(),
)
})?;
let model_info = session
.services
.models_manager
.get_model_info(model.as_str(), &config.to_models_manager_config())
.await;
if model_info.supports_service_tier(candidate_service_tier.as_str()) {
config.service_tier = Some(candidate_service_tier);
return Ok(());
}
if !requested_service_tier_is_effective {
config.service_tier = None;
return Ok(());
}
let supported_service_tiers = if model_info.service_tiers.is_empty() {
"none".to_string()
} else {
model_info
.service_tiers
.iter()
.map(|tier| tier.id.as_str())
.collect::<Vec<_>>()
.join(", ")
};
Err(FunctionCallError::RespondToModel(format!(
"Service tier `{candidate_service_tier}` is not supported for model `{model}`. Supported service tiers: {supported_service_tiers}"
)))
}
fn find_spawn_agent_model_name(
available_models: &[codex_protocol::openai_models::ModelPreset],
requested_model: &str,

View File

@@ -8,6 +8,7 @@ use std::collections::BTreeMap;
const SPAWN_AGENT_INHERITED_MODEL_GUIDANCE: &str = "Spawned agents inherit your current model by default. Omit `model` to use that preferred default; set `model` only when an explicit override is needed.";
const SPAWN_AGENT_MODEL_OVERRIDE_DESCRIPTION: &str = "Optional model override for the new agent. Leave unset to inherit the same model as the parent, which is the preferred default. Only set this when the user explicitly asks for a different model or the task clearly requires one.";
const SPAWN_AGENT_SERVICE_TIER_OVERRIDE_DESCRIPTION: &str = "Optional service tier override for the new agent. Leave unset unless the user explicitly asks for a service tier override. When unset, the spawned agent inherits the parent's active service tier if its effective model supports it; otherwise no service tier is used.";
#[derive(Debug, Clone, Default)]
pub struct SpawnAgentToolOptions {
@@ -545,6 +546,12 @@ fn spawn_agent_common_properties_v1(agent_type_description: &str) -> BTreeMap<St
.to_string(),
)),
),
(
"service_tier".to_string(),
JsonSchema::string(Some(
SPAWN_AGENT_SERVICE_TIER_OVERRIDE_DESCRIPTION.to_string(),
)),
),
])
}
@@ -578,6 +585,12 @@ fn spawn_agent_common_properties_v2(agent_type_description: &str) -> BTreeMap<St
.to_string(),
)),
),
(
"service_tier".to_string(),
JsonSchema::string(Some(
SPAWN_AGENT_SERVICE_TIER_OVERRIDE_DESCRIPTION.to_string(),
)),
),
])
}
@@ -585,6 +598,7 @@ fn hide_spawn_agent_metadata_options(properties: &mut BTreeMap<String, JsonSchem
properties.remove("agent_type");
properties.remove("model");
properties.remove("reasoning_effort");
properties.remove("service_tier");
}
fn spawn_agent_tool_description(
@@ -712,13 +726,24 @@ fn spawn_agent_models_description(models: &[ModelPreset]) -> String {
.map(|preset| format!("{} ({})", preset.effort, preset.description))
.collect::<Vec<_>>()
.join(", ");
let service_tiers = if model.service_tiers.is_empty() {
"none".to_string()
} else {
model
.service_tiers
.iter()
.map(|tier| format!("{} ({}: {})", tier.id, tier.name, tier.description))
.collect::<Vec<_>>()
.join(", ")
};
format!(
"- {} (`{}`): {} Default reasoning effort: {}. Supported reasoning efforts: {}.",
"- {} (`{}`): {} Default reasoning effort: {}. Supported reasoning efforts: {}. Supported service tiers: {}.",
model.display_name,
model.model,
model.description,
model.default_reasoning_effort,
efforts
efforts,
service_tiers
)
})
.collect::<Vec<_>>()

View File

@@ -1,5 +1,6 @@
use super::*;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelServiceTier;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_tools::JsonSchemaPrimitiveType;
@@ -20,7 +21,11 @@ fn model_preset(id: &str, show_in_picker: bool) -> ModelPreset {
}],
supports_personality: false,
additional_speed_tiers: Vec::new(),
service_tiers: Vec::new(),
service_tiers: vec![ModelServiceTier {
id: "priority".to_string(),
name: "Fast".to_string(),
description: "1.5x speed, increased usage".to_string(),
}],
is_default: false,
upgrade: None,
show_in_picker,
@@ -70,6 +75,10 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() {
.contains("Available model overrides (optional; inherited parent model is preferred):")
);
assert!(description.contains("visible display (`visible-model`)"));
assert!(
description
.contains("Supported service tiers: priority (Fast: 1.5x speed, increased usage).")
);
assert!(!description.contains("hidden display (`hidden-model`)"));
assert!(properties.contains_key("task_name"));
assert!(properties.contains_key("message"));
@@ -86,6 +95,12 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() {
.and_then(|schema| schema.description.as_deref()),
Some(SPAWN_AGENT_MODEL_OVERRIDE_DESCRIPTION)
);
assert_eq!(
properties
.get("service_tier")
.and_then(|schema| schema.description.as_deref()),
Some(SPAWN_AGENT_SERVICE_TIER_OVERRIDE_DESCRIPTION)
);
assert_eq!(
parameters.required.as_ref(),
Some(&vec!["task_name".to_string(), "message".to_string()])
@@ -127,6 +142,37 @@ fn spawn_agent_tool_v1_keeps_legacy_fork_context_field() {
.and_then(|schema| schema.description.as_deref()),
Some(SPAWN_AGENT_MODEL_OVERRIDE_DESCRIPTION)
);
assert_eq!(
properties
.get("service_tier")
.and_then(|schema| schema.description.as_deref()),
Some(SPAWN_AGENT_SERVICE_TIER_OVERRIDE_DESCRIPTION)
);
}
#[test]
fn spawn_agent_tool_hides_service_tier_with_spawn_metadata() {
let tool = create_spawn_agent_tool_v2(SpawnAgentToolOptions {
available_models: vec![model_preset("visible", /*show_in_picker*/ true)],
agent_type_description: "role help".to_string(),
hide_agent_type_model_reasoning: true,
include_usage_hint: true,
usage_hint_text: None,
max_concurrent_threads_per_session: Some(4),
});
let ToolSpec::Function(ResponsesApiTool { parameters, .. }) = tool else {
panic!("spawn_agent should be a function tool");
};
let properties = parameters
.properties
.as_ref()
.expect("spawn_agent should use object params");
assert!(!properties.contains_key("agent_type"));
assert!(!properties.contains_key("model"));
assert!(!properties.contains_key("reasoning_effort"));
assert!(!properties.contains_key("service_tier"));
}
#[test]

View File

@@ -23,6 +23,7 @@ use codex_model_provider::create_model_provider;
use codex_model_provider_info::built_in_model_providers;
use codex_protocol::AgentPath;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::ShellEnvironmentPolicy;
use codex_protocol::models::BaseInstructions;
use codex_protocol::models::ContentItem;
@@ -132,6 +133,38 @@ model_reasoning_effort = "minimal"
role_name
}
async fn install_role_with_service_tier(turn: &mut TurnContext, model: &str) -> String {
let role_name = "tier-role".to_string();
tokio::fs::create_dir_all(&turn.config.codex_home)
.await
.expect("codex home should be created");
let role_config_path = turn
.config
.codex_home
.as_path()
.join(format!("{role_name}.toml"));
tokio::fs::write(
&role_config_path,
format!(
"developer_instructions = \"Use the configured tier\"\nmodel = \"{model}\"\nservice_tier = \"{}\"\n",
ServiceTier::Fast.request_value()
),
)
.await
.expect("role config should be written");
let mut config = (*turn.config).clone();
config.agent_roles.insert(
role_name.clone(),
AgentRoleConfig {
description: Some("Tiered role".to_string()),
config_file: Some(role_config_path),
nickname_candidates: None,
},
);
turn.config = Arc::new(config);
role_name
}
fn expect_text_output<T>(output: T) -> (String, Option<bool>)
where
T: ToolOutput,
@@ -444,6 +477,533 @@ async fn multi_agent_v2_spawn_defaults_to_full_fork_and_rejects_child_model_over
);
}
#[tokio::test]
async fn spawn_agent_service_tier_override_uses_supported_child_model_tier() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"model": "gpt-5.4",
"service_tier": ServiceTier::Fast.request_value()
})),
))
.await
.expect("spawn_agent should accept a supported explicit service tier");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn spawn_agent_role_service_tier_persists_in_child_config() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, mut turn) = make_session_and_context().await;
let role_name = install_role_with_service_tier(&mut turn, "gpt-5.4").await;
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"agent_type": role_name
})),
))
.await
.expect("role-configured service tier should persist in the child config");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn spawn_agent_role_service_tier_overrides_spawn_argument() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, mut turn) = make_session_and_context().await;
let role_name = install_role_with_service_tier(&mut turn, "gpt-5.4").await;
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"agent_type": role_name,
"service_tier": "turbo"
})),
))
.await
.expect("role-configured service tier should win over the spawn argument");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn spawn_agent_service_tier_override_rejects_unknown_tier() {
let (session, turn) = make_session_and_context().await;
let err = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"model": "gpt-5.4",
"service_tier": "turbo"
})),
))
.await
.expect_err("unknown service tier should be rejected");
assert_eq!(
err,
FunctionCallError::RespondToModel(
"Service tier `turbo` is not supported for model `gpt-5.4`. Supported service tiers: priority"
.to_string()
)
);
}
#[tokio::test]
async fn spawn_agent_service_tier_override_rejects_tier_unsupported_by_child_model() {
let (session, turn) = make_session_and_context().await;
let err = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"model": "gpt-5.3-codex",
"service_tier": ServiceTier::Fast.request_value()
})),
))
.await
.expect_err("tier unsupported by the final child model should be rejected");
assert_eq!(
err,
FunctionCallError::RespondToModel(
"Service tier `priority` is not supported for model `gpt-5.3-codex`. Supported service tiers: none"
.to_string()
)
);
}
#[tokio::test]
async fn multi_agent_v2_spawn_role_service_tier_persists_in_child_config() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
task_name: String,
}
let (mut session, mut turn) = make_session_and_context().await;
let role_name = install_role_with_service_tier(&mut turn, "gpt-5.4").await;
let mut config = (*turn.config).clone();
config
.features
.enable(Feature::MultiAgentV2)
.expect("test config should allow feature update");
turn.config = Arc::new(config);
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let session = Arc::new(session);
let turn = Arc::new(turn);
let output = SpawnAgentHandlerV2::default()
.handle(invocation(
session.clone(),
turn.clone(),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"task_name": "tiered_role",
"agent_type": role_name,
"fork_turns": "1"
})),
))
.await
.expect("role-owned service tier should persist in v2 child config");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let child_thread_id = session
.services
.agent_control
.resolve_agent_reference(
session.conversation_id,
&turn.session_source,
result.task_name.as_str(),
)
.await
.expect("spawned task name should resolve");
let snapshot = manager
.get_thread(child_thread_id)
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn multi_agent_v2_spawn_role_service_tier_overrides_spawn_argument() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
task_name: String,
}
let (mut session, mut turn) = make_session_and_context().await;
let role_name = install_role_with_service_tier(&mut turn, "gpt-5.4").await;
let mut config = (*turn.config).clone();
config
.features
.enable(Feature::MultiAgentV2)
.expect("test config should allow feature update");
turn.config = Arc::new(config);
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let session = Arc::new(session);
let turn = Arc::new(turn);
let output = SpawnAgentHandlerV2::default()
.handle(invocation(
session.clone(),
turn.clone(),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"task_name": "tiered_role_override",
"agent_type": role_name,
"service_tier": "turbo",
"fork_turns": "1"
})),
))
.await
.expect("role-configured service tier should win over the spawn argument");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let child_thread_id = session
.services
.agent_control
.resolve_agent_reference(
session.conversation_id,
&turn.session_source,
result.task_name.as_str(),
)
.await
.expect("spawned task name should resolve");
let snapshot = manager
.get_thread(child_thread_id)
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn spawn_agent_inherits_supported_parent_service_tier() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, turn) = make_session_and_context().await;
let mut turn = turn
.with_model("gpt-5.4".to_string(), &session.services.models_manager)
.await;
let mut config = (*turn.config).clone();
config.service_tier = Some(ServiceTier::Fast.request_value().to_string());
turn.config = Arc::new(config);
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({"message": "inspect this repo"})),
))
.await
.expect("spawn_agent should inherit a supported parent service tier");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn spawn_agent_clears_inherited_service_tier_when_child_model_does_not_support_it() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, turn) = make_session_and_context().await;
let mut turn = turn
.with_model("gpt-5.4".to_string(), &session.services.models_manager)
.await;
let mut config = (*turn.config).clone();
config.service_tier = Some(ServiceTier::Fast.request_value().to_string());
turn.config = Arc::new(config);
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"model": "gpt-5.3-codex"
})),
))
.await
.expect("spawn_agent should clear unsupported inherited service tier");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(snapshot.service_tier, None);
}
#[tokio::test]
async fn spawn_agent_full_history_fork_accepts_explicit_service_tier() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, turn) = make_session_and_context().await;
let turn = turn
.with_model("gpt-5.4".to_string(), &session.services.models_manager)
.await;
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let output = SpawnAgentHandler::default()
.handle(invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"fork_context": true,
"service_tier": ServiceTier::Fast.request_value()
})),
))
.await
.expect("full-history fork should accept explicit service tier");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let snapshot = manager
.get_thread(parse_agent_id(&result.agent_id))
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn multi_agent_v2_full_history_fork_accepts_explicit_service_tier() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
task_name: String,
}
let (mut session, turn) = make_session_and_context().await;
let mut turn = turn
.with_model("gpt-5.4".to_string(), &session.services.models_manager)
.await;
let mut config = (*turn.config).clone();
config
.features
.enable(Feature::MultiAgentV2)
.expect("test config should allow feature update");
turn.config = Arc::new(config);
let manager = thread_manager();
let root = manager
.start_thread((*turn.config).clone())
.await
.expect("root thread should start");
session.services.agent_control = manager.agent_control();
session.conversation_id = root.thread_id;
let session = Arc::new(session);
let turn = Arc::new(turn);
let output = SpawnAgentHandlerV2::default()
.handle(invocation(
session.clone(),
turn.clone(),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"task_name": "fork_with_tier",
"service_tier": ServiceTier::Fast.request_value()
})),
))
.await
.expect("multi-agent v2 full-history fork should accept explicit service tier");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let child_thread_id = session
.services
.agent_control
.resolve_agent_reference(
session.conversation_id,
&turn.session_source,
result.task_name.as_str(),
)
.await
.expect("spawned task name should resolve");
let snapshot = manager
.get_thread(child_thread_id)
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(
snapshot.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
}
#[tokio::test]
async fn multi_agent_v2_spawn_partial_fork_turns_allows_agent_type_override() {
let (mut session, mut turn) = make_session_and_context().await;

View File

@@ -81,6 +81,9 @@ impl ToolHandler for Handler {
.await;
let mut config =
build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?;
if let Some(service_tier) = args.service_tier.as_ref() {
config.service_tier = Some(service_tier.clone());
}
if matches!(fork_mode, Some(SpawnAgentForkMode::FullHistory)) {
reject_full_fork_spawn_overrides(
role_name,
@@ -100,6 +103,13 @@ impl ToolHandler for Handler {
.await
.map_err(FunctionCallError::RespondToModel)?;
}
apply_spawn_agent_service_tier(
&session,
&mut config,
turn.config.service_tier.as_deref(),
args.service_tier.as_deref(),
)
.await?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
@@ -236,6 +246,7 @@ struct SpawnAgentArgs {
agent_type: Option<String>,
model: Option<String>,
reasoning_effort: Option<ReasoningEffort>,
service_tier: Option<String>,
fork_turns: Option<String>,
fork_context: Option<bool>,
}

View File

@@ -9,6 +9,7 @@ use codex_models_manager::manager::SharedModelsManager;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelServiceTier;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
@@ -52,6 +53,7 @@ fn test_model_info(
visibility: ModelVisibility,
default_reasoning_level: ReasoningEffort,
supported_reasoning_levels: Vec<ReasoningEffortPreset>,
service_tiers: Vec<ModelServiceTier>,
) -> ModelInfo {
ModelInfo {
slug: slug.to_string(),
@@ -67,7 +69,7 @@ fn test_model_info(
supports_search_tool: false,
priority: 1,
additional_speed_tiers: Vec::new(),
service_tiers: Vec::new(),
service_tiers,
upgrade: None,
base_instructions: "base instructions".to_string(),
model_messages: None,
@@ -126,6 +128,11 @@ async fn spawn_agent_description_lists_visible_models_and_reasoning_efforts() ->
description: "Deep dive".to_string(),
},
],
vec![ModelServiceTier {
id: "priority".to_string(),
name: "Fast".to_string(),
description: "1.5x speed, increased usage".to_string(),
}],
),
test_model_info(
"hidden-model",
@@ -137,6 +144,7 @@ async fn spawn_agent_description_lists_visible_models_and_reasoning_efforts() ->
effort: ReasoningEffort::Low,
description: "Not visible".to_string(),
}],
Vec::new(),
),
],
},
@@ -195,6 +203,11 @@ async fn spawn_agent_description_lists_visible_models_and_reasoning_efforts() ->
description.contains("low (Quick scan), high (Deep dive)."),
"expected reasoning efforts in spawn_agent description: {description:?}"
);
assert!(
description
.contains("Supported service tiers: priority (Fast: 1.5x speed, increased usage)."),
"expected service tier guidance in spawn_agent description: {description:?}"
);
assert!(
!description.contains("Hidden Model"),
"hidden picker model should be omitted from spawn_agent description: {description:?}"