Compare commits

...

1 Commits

Author SHA1 Message Date
Gene Oden
b9829ca000 feat: override model and reasoning effort for subagents
Useful for starting the top-level with high/xhigh effort then
subagents with different models and/or reasoning levels as appropriate
to the task.
2026-01-23 10:10:39 -08:00
5 changed files with 140 additions and 2 deletions

View File

@@ -13,6 +13,7 @@ use crate::tools::registry::ToolKind;
use async_trait::async_trait;
use codex_protocol::ThreadId;
use codex_protocol::models::BaseInstructions;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::CollabAgentInteractionBeginEvent;
use codex_protocol::protocol::CollabAgentInteractionEndEvent;
use codex_protocol::protocol::CollabAgentSpawnBeginEvent;
@@ -84,6 +85,8 @@ mod spawn {
struct SpawnAgentArgs {
message: String,
agent_type: Option<AgentRole>,
model: Option<String>,
reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Debug, Serialize)]
@@ -99,6 +102,13 @@ mod spawn {
) -> Result<ToolOutput, FunctionCallError> {
let args: SpawnAgentArgs = parse_arguments(&arguments)?;
let agent_role = args.agent_type.unwrap_or(AgentRole::Default);
if let Some(model) = args.model.as_deref()
&& model.trim().is_empty()
{
return Err(FunctionCallError::RespondToModel(
"model must be non-empty when provided".to_string(),
));
}
let prompt = args.message;
if prompt.trim().is_empty() {
return Err(FunctionCallError::RespondToModel(
@@ -121,7 +131,12 @@ mod spawn {
agent_role
.apply_to_config(&mut config)
.map_err(FunctionCallError::RespondToModel)?;
// Allow explicit per-spawn overrides to supersede both the inherited turn
// configuration and any role-specific defaults (e.g. Worker model).
apply_agent_spawn_overrides(&mut config, args.model, args.reasoning_effort);
let model = config.model.clone();
let reasoning_effort = config.model_reasoning_effort;
let result = session
.services
.agent_control
@@ -143,6 +158,8 @@ mod spawn {
sender_thread_id: session.conversation_id,
new_thread_id,
prompt,
model,
reasoning_effort,
status,
}
.into(),
@@ -592,6 +609,19 @@ fn build_agent_spawn_config(
Ok(config)
}
fn apply_agent_spawn_overrides(
config: &mut Config,
model: Option<String>,
reasoning_effort: Option<ReasoningEffort>,
) {
if let Some(model) = model {
config.model = Some(model);
}
if let Some(reasoning_effort) = reasoning_effort {
config.model_reasoning_effort = Some(reasoning_effort);
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -723,6 +753,54 @@ mod tests {
);
}
#[tokio::test]
async fn spawn_agent_rejects_empty_model_override() {
let (session, turn) = make_session_and_context().await;
let invocation = invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "hello",
"model": " ",
})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
panic!("empty model should be rejected");
};
assert_eq!(
err,
FunctionCallError::RespondToModel("model must be non-empty when provided".to_string())
);
}
#[tokio::test]
async fn spawn_overrides_win_over_role_defaults() {
use crate::agent::AgentRole;
let (_session, turn) = make_session_and_context().await;
let base_instructions = BaseInstructions {
text: "base".to_string(),
};
let mut config = build_agent_spawn_config(&base_instructions, &turn).expect("spawn config");
AgentRole::Worker
.apply_to_config(&mut config)
.expect("apply worker role");
apply_agent_spawn_overrides(
&mut config,
Some("gpt-5".to_string()),
Some(ReasoningEffort::Minimal),
);
assert_eq!(config.model, Some("gpt-5".to_string()));
assert_eq!(
config.model_reasoning_effort,
Some(ReasoningEffort::Minimal)
);
}
#[tokio::test]
async fn send_input_rejects_empty_message() {
let (session, turn) = make_session_and_context().await;

View File

@@ -454,6 +454,24 @@ fn create_spawn_agent_tool() -> ToolSpec {
)),
},
);
properties.insert(
"model".to_string(),
JsonSchema::String {
description: Some(
"Optional model override for the spawned agent (e.g. \"gpt-5.2-codex\"). When set, overrides the inherited session model and any agent_type defaults."
.to_string(),
),
},
);
properties.insert(
"reasoning_effort".to_string(),
JsonSchema::String {
description: Some(
"Optional reasoning effort for the spawned agent (one of: \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\"). When set, overrides the inherited session reasoning effort."
.to_string(),
),
},
);
ToolSpec::Function(ResponsesApiTool {
name: "spawn_agent".to_string(),

View File

@@ -53,9 +53,37 @@ You are Codex Orchestrator, based on GPT-5. You are running as an orchestration
* By default, workers must not spawn sub-agents unless explicitly allowed.
* When multiple workers are active, you may pass multiple IDs to `wait` to react to the first completion and keep the workflow event-driven and use a long timeout (e.g. 5 minutes).
## Choosing model and reasoning effort
When spawning a worker, you may optionally override its `model` and/or `reasoning_effort`. If provided, these overrides take precedence over the inherited session settings and any `agent_type` defaults.
### Models (OpenAI)
Use the most capable model that matches the task and latency/cost constraints:
* `gpt-5.2-codex` (default): best general choice for repo-scale coding, tool use, and multi-step changes.
* `gpt-5.1-codex-max`: strong for deep reasoning on tricky code, large diffs, and higher-risk refactors.
* `gpt-5.1-codex-mini`: faster/cheaper; good for quick triage, small edits, and straightforward tasks.
* `gpt-5.2`: strong general model when the work is less tool-heavy and needs broader knowledge/explanations.
* `gpt-5`: general model with broad knowledge; use for documentation, reasoning, or non-coding-heavy sub-tasks.
* `gpt-5.1`: older general model; use only when you need parity with prior behavior.
### Reasoning effort
Pick the smallest effort that is likely to succeed on the first try:
* `minimal`: trivial lookups, short explanations, mechanical edits.
* `low`: straightforward tasks with clear requirements; fast iteration.
* `medium`: default for typical coding/debugging work.
* `high`: ambiguous tasks, multi-file changes, tricky debugging, integration work.
* `xhigh`: large/complex refactors, deep investigations, or when prior attempts failed.
* `none`: rarely; only when you explicitly want to suppress extra reasoning overhead.
Note: not every model supports every effort. If an effort isn't supported, choose the closest supported level (e.g. `high` instead of `xhigh`).
## Collab tools
* `spawn_agent`: create a worker with an initial prompt (`agent_type` required).
* `spawn_agent`: create a worker with an initial prompt (`agent_type` optional; `model` and `reasoning_effort` optional overrides).
* `send_input`: send follow-ups or fixes (queued unless interrupted).
* `send_input(interrupt=true)`: stop current work and redirect immediately.
* `wait`: wait for one or more workers; returns when at least one finishes.

View File

@@ -2243,6 +2243,12 @@ pub struct CollabAgentSpawnEndEvent {
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
/// Model slug used for the newly spawned agent.
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Reasoning effort used for the newly spawned agent.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Last known status of the new agent reported to the sender agent.
pub status: AgentStatus,
}

View File

@@ -23,6 +23,8 @@ pub(crate) fn spawn_end(ev: CollabAgentSpawnEndEvent) -> PlainHistoryCell {
sender_thread_id: _,
new_thread_id,
prompt,
model,
reasoning_effort,
status,
} = ev;
let new_agent = new_thread_id
@@ -31,8 +33,14 @@ pub(crate) fn spawn_end(ev: CollabAgentSpawnEndEvent) -> PlainHistoryCell {
let mut details = vec![
detail_line("call", call_id),
detail_line("agent", new_agent),
status_line(&status),
];
if let Some(model) = model {
details.push(detail_line("model", model));
}
if let Some(reasoning_effort) = reasoning_effort {
details.push(detail_line("effort", reasoning_effort.to_string()));
}
details.push(status_line(&status));
if let Some(line) = prompt_line(&prompt) {
details.push(line);
}