Make collab spawn metadata accurate on completion

This commit is contained in:
Ahmed Ibrahim
2026-03-11 20:01:10 -07:00
parent 5bc82c5b93
commit cfb3b3e56c
6 changed files with 143 additions and 47 deletions

View File

@@ -554,8 +554,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: Vec::new(),
prompt: Some(payload.prompt.clone()),
model: Some(payload.model.clone()),
reasoning_effort: Some(payload.reasoning_effort),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -729,8 +729,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: None,
reasoning_effort: None,
model: payload.model.clone(),
reasoning_effort: payload.reasoning_effort,
agents_states,
});
}
@@ -775,8 +775,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: None,
reasoning_effort: None,
model: payload.model.clone(),
reasoning_effort: payload.reasoning_effort,
agents_states,
});
}
@@ -2325,6 +2325,8 @@ mod tests {
.expect("valid receiver thread id"),
receiver_agent_nickname: None,
receiver_agent_role: None,
model: Some("gpt-5.4-mini".into()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
status: AgentStatus::Completed(None),
}),
];
@@ -2345,8 +2347,8 @@ mod tests {
sender_thread_id: "00000000-0000-0000-0000-000000000001".into(),
receiver_thread_ids: vec!["00000000-0000-0000-0000-000000000002".into()],
prompt: None,
model: None,
reasoning_effort: None,
model: Some("gpt-5.4-mini".into()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
agents_states: [(
"00000000-0000-0000-0000-000000000002".into(),
CollabAgentState {

View File

@@ -862,8 +862,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: Vec::new(),
prompt: Some(begin_event.prompt),
model: Some(begin_event.model),
reasoning_effort: Some(begin_event.reasoning_effort),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
@@ -1078,8 +1078,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: None,
reasoning_effort: None,
model: end_event.model,
reasoning_effort: end_event.reasoning_effort,
agents_states,
};
let notification = ItemCompletedNotification {
@@ -2557,8 +2557,8 @@ fn collab_resume_end_item(end_event: codex_protocol::protocol::CollabResumeEndEv
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: None,
reasoning_effort: None,
model: end_event.model,
reasoning_effort: end_event.reasoning_effort,
agents_states,
}
}
@@ -2946,6 +2946,8 @@ mod tests {
receiver_thread_id: ThreadId::new(),
receiver_agent_nickname: None,
receiver_agent_role: None,
model: Some("gpt-5.4-mini".to_string()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
status: codex_protocol::protocol::AgentStatus::NotFound,
};
@@ -2958,8 +2960,8 @@ mod tests {
sender_thread_id: event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id.clone()],
prompt: None,
model: None,
reasoning_effort: None,
model: Some("gpt-5.4-mini".to_string()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
agents_states: [(
receiver_id,
V2CollabAgentStatus::from(codex_protocol::protocol::AgentStatus::NotFound),

View File

@@ -1664,7 +1664,7 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
}
#[tokio::test]
async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()> {
async fn turn_start_omits_spawn_agent_model_metadata_until_completion_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
const CHILD_PROMPT: &str = "child: do work";
@@ -1777,8 +1777,8 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<()
sender_thread_id: thread.id.clone(),
receiver_thread_ids: Vec::new(),
prompt: Some(CHILD_PROMPT.to_string()),
model: Some(REQUESTED_MODEL.to_string()),
reasoning_effort: Some(REQUESTED_REASONING_EFFORT),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
}
);

View File

@@ -74,6 +74,14 @@ pub(crate) struct AgentControl {
state: Arc<Guards>,
}
#[derive(Debug, Clone)]
pub(crate) struct AgentMetadata {
pub(crate) nickname: Option<String>,
pub(crate) role: Option<String>,
pub(crate) model: String,
pub(crate) reasoning_effort: Option<codex_protocol::openai_models::ReasoningEffort>,
}
impl AgentControl {
/// Construct a new `AgentControl` that can spawn/message agents via the given manager state.
pub(crate) fn new(manager: Weak<ThreadManagerState>) -> Self {
@@ -347,17 +355,26 @@ impl AgentControl {
&self,
agent_id: ThreadId,
) -> Option<(Option<String>, Option<String>)> {
self.get_agent_metadata(agent_id)
.await
.map(|metadata| (metadata.nickname, metadata.role))
}
pub(crate) async fn get_agent_metadata(&self, agent_id: ThreadId) -> Option<AgentMetadata> {
let Ok(state) = self.upgrade() else {
return None;
};
let Ok(thread) = state.get_thread(agent_id).await else {
return None;
};
let session_source = thread.config_snapshot().await.session_source;
Some((
session_source.get_nickname(),
session_source.get_agent_role(),
))
let snapshot = thread.config_snapshot().await;
let session_source = snapshot.session_source;
Some(AgentMetadata {
nickname: session_source.get_nickname(),
role: session_source.get_agent_role(),
model: snapshot.model,
reasoning_effort: snapshot.reasoning_effort,
})
}
/// Subscribe to status updates for `agent_id`, yielding the latest value and changes.

View File

@@ -157,8 +157,6 @@ mod spawn {
call_id: call_id.clone(),
sender_thread_id: session.conversation_id,
prompt: prompt.clone(),
model: args.model.clone().unwrap_or_default(),
reasoning_effort: args.reasoning_effort.unwrap_or_default(),
}
.into(),
)
@@ -178,6 +176,8 @@ mod spawn {
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
let configured_model = config.model.clone().unwrap_or_default();
let configured_reasoning_effort = config.model_reasoning_effort.unwrap_or_default();
let result = session
.services
@@ -203,15 +203,30 @@ mod spawn {
),
Err(_) => (None, AgentStatus::NotFound),
};
let (new_agent_nickname, new_agent_role) = match new_thread_id {
Some(thread_id) => session
.services
.agent_control
.get_agent_nickname_and_role(thread_id)
.await
.unwrap_or((None, None)),
None => (None, None),
let agent_metadata = match new_thread_id {
Some(thread_id) => {
session
.services
.agent_control
.get_agent_metadata(thread_id)
.await
}
None => None,
};
let new_agent_nickname = agent_metadata
.as_ref()
.and_then(|metadata| metadata.nickname.clone());
let new_agent_role = agent_metadata
.as_ref()
.and_then(|metadata| metadata.role.clone());
let spawned_model = agent_metadata
.as_ref()
.map(|metadata| metadata.model.clone())
.unwrap_or(configured_model);
let spawned_reasoning_effort = agent_metadata
.as_ref()
.and_then(|metadata| metadata.reasoning_effort)
.unwrap_or(configured_reasoning_effort);
let nickname = new_agent_nickname.clone();
session
.send_event(
@@ -223,8 +238,8 @@ mod spawn {
new_agent_nickname,
new_agent_role,
prompt,
model: args.model.clone().unwrap_or_default(),
reasoning_effort: args.reasoning_effort.unwrap_or_default(),
model: spawned_model,
reasoning_effort: spawned_reasoning_effort,
status,
}
.into(),
@@ -360,12 +375,25 @@ mod resume_agent {
) -> Result<FunctionToolOutput, FunctionCallError> {
let args: ResumeAgentArgs = parse_arguments(&arguments)?;
let receiver_thread_id = agent_id(&args.id)?;
let (receiver_agent_nickname, receiver_agent_role) = session
let receiver_agent_metadata = session
.services
.agent_control
.get_agent_nickname_and_role(receiver_thread_id)
.get_agent_metadata(receiver_thread_id)
.await
.unwrap_or((None, None));
.map(|metadata| {
let nickname = metadata.nickname;
let role = metadata.role;
let model = Some(metadata.model);
let reasoning_effort = metadata.reasoning_effort;
(nickname, role, model, reasoning_effort)
})
.unwrap_or((None, None, None, None));
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = receiver_agent_metadata;
let child_depth = next_thread_spawn_depth(&turn.session_source);
let max_depth = turn.config.agent_max_depth;
if exceeds_thread_spawn_depth_limit(child_depth, max_depth) {
@@ -413,12 +441,30 @@ mod resume_agent {
None
};
let (receiver_agent_nickname, receiver_agent_role) = session
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = session
.services
.agent_control
.get_agent_nickname_and_role(receiver_thread_id)
.get_agent_metadata(receiver_thread_id)
.await
.unwrap_or((receiver_agent_nickname, receiver_agent_role));
.map(|metadata| {
(
metadata.nickname,
metadata.role,
Some(metadata.model),
metadata.reasoning_effort,
)
})
.unwrap_or((
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
));
session
.send_event(
&turn,
@@ -428,6 +474,8 @@ mod resume_agent {
receiver_thread_id,
receiver_agent_nickname,
receiver_agent_role,
model: receiver_model,
reasoning_effort: receiver_reasoning_effort,
status: status.clone(),
}
.into(),
@@ -697,12 +745,25 @@ pub mod close_agent {
) -> Result<FunctionToolOutput, FunctionCallError> {
let args: CloseAgentArgs = parse_arguments(&arguments)?;
let agent_id = agent_id(&args.id)?;
let (receiver_agent_nickname, receiver_agent_role) = session
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = session
.services
.agent_control
.get_agent_nickname_and_role(agent_id)
.get_agent_metadata(agent_id)
.await
.unwrap_or((None, None));
.map(|metadata| {
(
metadata.nickname,
metadata.role,
Some(metadata.model),
metadata.reasoning_effort,
)
})
.unwrap_or((None, None, None, None));
session
.send_event(
&turn,
@@ -732,6 +793,8 @@ pub mod close_agent {
receiver_thread_id: agent_id,
receiver_agent_nickname: receiver_agent_nickname.clone(),
receiver_agent_role: receiver_agent_role.clone(),
model: receiver_model.clone(),
reasoning_effort: receiver_reasoning_effort,
status,
}
.into(),
@@ -760,6 +823,8 @@ pub mod close_agent {
receiver_thread_id: agent_id,
receiver_agent_nickname,
receiver_agent_role,
model: receiver_model,
reasoning_effort: receiver_reasoning_effort,
status: status.clone(),
}
.into(),

View File

@@ -3132,8 +3132,6 @@ pub struct CollabAgentSpawnBeginEvent {
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
pub model: String,
pub reasoning_effort: ReasoningEffortConfig,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
@@ -3271,6 +3269,12 @@ pub struct CollabCloseEndEvent {
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option<String>,
/// Model configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Reasoning effort configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Last known status of the receiver agent reported to the sender agent before
/// the close.
pub status: AgentStatus,
@@ -3306,6 +3310,12 @@ pub struct CollabResumeEndEvent {
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option<String>,
/// Model configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Reasoning effort configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Last known status of the receiver agent reported to the sender agent after
/// resume.
pub status: AgentStatus,