Compare commits

...

5 Commits

Author SHA1 Message Date
Ahmed Ibrahim
4e99a4d0ed Make collab spawn metadata accurate on completion 2026-03-11 20:01:10 -07:00
Ahmed Ibrahim
89ff3ccfb3 codex: fix CI failure on PR #14410
Filter the turn completion assertion to the parent thread so the spawn metadata integration test is robust to child-first completion ordering.

Co-authored-by: Codex <noreply@openai.com>
2026-03-11 18:53:16 -07:00
Ahmed Ibrahim
12f809407d codex: fix CI failure on PR #14410
Remove the unnecessary clone on spawn reasoning effort and add app-server integration coverage for spawn item model metadata.

Co-authored-by: Codex <noreply@openai.com>
2026-03-11 17:05:17 -07:00
Ahmed Ibrahim
08303687bb codex: fix CI failure on PR #14410
Ignore newly added spawn metadata fields in the TUI spawn-end destructure so the updated event type keeps compiling.

Co-authored-by: Codex <noreply@openai.com>
2026-03-11 16:52:14 -07:00
Ahmed Ibrahim
288864e847 Include spawn agent model metadata in app-server items
Add the spawned agent's model and reasoning effort to app-server collab turn items and notifications so turn history matches spawn events.

Co-authored-by: Codex <noreply@openai.com>
2026-03-11 16:46:51 -07:00
31 changed files with 1000 additions and 26 deletions

View File

@@ -3059,6 +3059,10 @@
"description": "Identifier for the collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent.",
"type": "string"
},
"new_agent_nickname": {
"description": "Optional nickname assigned to the new agent.",
"type": [
@@ -3088,6 +3092,14 @@
"description": "Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning.",
"type": "string"
},
"reasoning_effort": {
"allOf": [
{
"$ref": "#/definitions/ReasoningEffort"
}
],
"description": "Reasoning effort requested for the spawned agent."
},
"sender_thread_id": {
"allOf": [
{
@@ -3114,7 +3126,9 @@
},
"required": [
"call_id",
"model",
"prompt",
"reasoning_effort",
"sender_thread_id",
"status",
"type"
@@ -9196,6 +9210,10 @@
"description": "Identifier for the collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent.",
"type": "string"
},
"new_agent_nickname": {
"description": "Optional nickname assigned to the new agent.",
"type": [
@@ -9225,6 +9243,14 @@
"description": "Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning.",
"type": "string"
},
"reasoning_effort": {
"allOf": [
{
"$ref": "#/definitions/ReasoningEffort"
}
],
"description": "Reasoning effort requested for the spawned agent."
},
"sender_thread_id": {
"allOf": [
{
@@ -9251,7 +9277,9 @@
},
"required": [
"call_id",
"model",
"prompt",
"reasoning_effort",
"sender_thread_id",
"status",
"type"

View File

@@ -1588,6 +1588,18 @@
],
"type": "object"
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"ReasoningSummaryPartAddedNotification": {
"properties": {
"itemId": {
@@ -2375,6 +2387,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -2382,6 +2401,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -4422,6 +4422,10 @@
"description": "Identifier for the collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent.",
"type": "string"
},
"new_agent_nickname": {
"description": "Optional nickname assigned to the new agent.",
"type": [
@@ -4451,6 +4455,14 @@
"description": "Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning.",
"type": "string"
},
"reasoning_effort": {
"allOf": [
{
"$ref": "#/definitions/v2/ReasoningEffort"
}
],
"description": "Reasoning effort requested for the spawned agent."
},
"sender_thread_id": {
"allOf": [
{
@@ -4477,7 +4489,9 @@
},
"required": [
"call_id",
"model",
"prompt",
"reasoning_effort",
"sender_thread_id",
"status",
"type"
@@ -15720,6 +15734,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -15727,6 +15748,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/v2/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -6224,6 +6224,10 @@
"description": "Identifier for the collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent.",
"type": "string"
},
"new_agent_nickname": {
"description": "Optional nickname assigned to the new agent.",
"type": [
@@ -6253,6 +6257,14 @@
"description": "Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning.",
"type": "string"
},
"reasoning_effort": {
"allOf": [
{
"$ref": "#/definitions/ReasoningEffort"
}
],
"description": "Reasoning effort requested for the spawned agent."
},
"sender_thread_id": {
"allOf": [
{
@@ -6279,7 +6291,9 @@
},
"required": [
"call_id",
"model",
"prompt",
"reasoning_effort",
"sender_thread_id",
"status",
"type"
@@ -13487,6 +13501,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -13494,6 +13515,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -374,6 +374,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -751,6 +763,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -758,6 +777,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -374,6 +374,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -751,6 +763,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -758,6 +777,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -488,6 +488,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -865,6 +877,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -872,6 +891,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -1349,6 +1349,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1356,6 +1363,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -1349,6 +1349,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1356,6 +1363,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -1349,6 +1349,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1356,6 +1363,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -511,6 +511,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"SessionSource": {
"oneOf": [
{
@@ -1103,6 +1115,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -1110,6 +1129,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -488,6 +488,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -865,6 +877,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -872,6 +891,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -488,6 +488,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -865,6 +877,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -872,6 +891,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -488,6 +488,18 @@
}
]
},
"ReasoningEffort": {
"description": "See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning",
"enum": [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh"
],
"type": "string"
},
"TextElement": {
"properties": {
"byteRange": {
@@ -865,6 +877,13 @@
"description": "Unique identifier for this collab tool call.",
"type": "string"
},
"model": {
"description": "Model requested for the spawned agent, when applicable.",
"type": [
"string",
"null"
]
},
"prompt": {
"description": "Prompt text sent as part of the collab tool call, when available.",
"type": [
@@ -872,6 +891,17 @@
"null"
]
},
"reasoningEffort": {
"anyOf": [
{
"$ref": "#/definitions/ReasoningEffort"
},
{
"type": "null"
}
],
"description": "Reasoning effort requested for the spawned agent, when applicable."
},
"receiverThreadIds": {
"description": "Thread ID of the receiving agent, when applicable. In case of spawn operation, this corresponds to the newly spawned agent.",
"items": {

View File

@@ -2,6 +2,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { AgentStatus } from "./AgentStatus";
import type { ReasoningEffort } from "./ReasoningEffort";
import type { ThreadId } from "./ThreadId";
export type CollabAgentSpawnEndEvent = {
@@ -30,6 +31,14 @@ new_agent_role?: string | null,
* beginning.
*/
prompt: string,
/**
* Model requested for the spawned agent.
*/
model: string,
/**
* Reasoning effort requested for the spawned agent.
*/
reasoning_effort: ReasoningEffort,
/**
* Last known status of the new agent reported to the sender agent.
*/

View File

@@ -2,6 +2,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { MessagePhase } from "../MessagePhase";
import type { ReasoningEffort } from "../ReasoningEffort";
import type { JsonValue } from "../serde_json/JsonValue";
import type { CollabAgentState } from "./CollabAgentState";
import type { CollabAgentTool } from "./CollabAgentTool";
@@ -82,6 +83,14 @@ receiverThreadIds: Array<string>,
* Prompt text sent as part of the collab tool call, when available.
*/
prompt: string | null,
/**
* Model requested for the spawned agent, when applicable.
*/
model: string | null,
/**
* Reasoning effort requested for the spawned agent, when applicable.
*/
reasoningEffort: ReasoningEffort | null,
/**
* Last known status of the target agents, when available.
*/

View File

@@ -554,6 +554,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: Vec::new(),
prompt: Some(payload.prompt.clone()),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -587,6 +589,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: Some(payload.prompt.clone()),
model: Some(payload.model.clone()),
reasoning_effort: Some(payload.reasoning_effort),
agents_states,
});
}
@@ -602,6 +606,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![payload.receiver_thread_id.to_string()],
prompt: Some(payload.prompt.clone()),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -624,6 +630,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id.clone()],
prompt: Some(payload.prompt.clone()),
model: None,
reasoning_effort: None,
agents_states: [(receiver_id, received_status)].into_iter().collect(),
});
}
@@ -643,6 +651,8 @@ impl ThreadHistoryBuilder {
.map(ToString::to_string)
.collect(),
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -676,6 +686,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: None,
model: None,
reasoning_effort: None,
agents_states,
});
}
@@ -691,6 +703,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![payload.receiver_thread_id.to_string()],
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -715,6 +729,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: payload.model.clone(),
reasoning_effort: payload.reasoning_effort,
agents_states,
});
}
@@ -730,6 +746,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![payload.receiver_thread_id.to_string()],
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
self.upsert_item_in_current_turn(item);
@@ -757,6 +775,8 @@ impl ThreadHistoryBuilder {
sender_thread_id: payload.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: payload.model.clone(),
reasoning_effort: payload.reasoning_effort,
agents_states,
});
}
@@ -2305,6 +2325,8 @@ mod tests {
.expect("valid receiver thread id"),
receiver_agent_nickname: None,
receiver_agent_role: None,
model: Some("gpt-5.4-mini".into()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
status: AgentStatus::Completed(None),
}),
];
@@ -2325,6 +2347,8 @@ mod tests {
sender_thread_id: "00000000-0000-0000-0000-000000000001".into(),
receiver_thread_ids: vec!["00000000-0000-0000-0000-000000000002".into()],
prompt: None,
model: Some("gpt-5.4-mini".into()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
agents_states: [(
"00000000-0000-0000-0000-000000000002".into(),
CollabAgentState {
@@ -2338,6 +2362,63 @@ mod tests {
);
}
#[test]
fn reconstructs_collab_spawn_end_item_with_model_metadata() {
let sender_thread_id = ThreadId::try_from("00000000-0000-0000-0000-000000000001")
.expect("valid sender thread id");
let spawned_thread_id = ThreadId::try_from("00000000-0000-0000-0000-000000000002")
.expect("valid receiver thread id");
let events = vec![
EventMsg::UserMessage(UserMessageEvent {
message: "spawn agent".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::CollabAgentSpawnEnd(codex_protocol::protocol::CollabAgentSpawnEndEvent {
call_id: "spawn-1".into(),
sender_thread_id,
new_thread_id: Some(spawned_thread_id),
new_agent_nickname: Some("Scout".into()),
new_agent_role: Some("explorer".into()),
prompt: "inspect the repo".into(),
model: "gpt-5.4-mini".into(),
reasoning_effort: codex_protocol::openai_models::ReasoningEffort::Medium,
status: AgentStatus::Running,
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
assert_eq!(turns.len(), 1);
assert_eq!(turns[0].items.len(), 2);
assert_eq!(
turns[0].items[1],
ThreadItem::CollabAgentToolCall {
id: "spawn-1".into(),
tool: CollabAgentTool::SpawnAgent,
status: CollabAgentToolCallStatus::Completed,
sender_thread_id: "00000000-0000-0000-0000-000000000001".into(),
receiver_thread_ids: vec!["00000000-0000-0000-0000-000000000002".into()],
prompt: Some("inspect the repo".into()),
model: Some("gpt-5.4-mini".into()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::Medium),
agents_states: [(
"00000000-0000-0000-0000-000000000002".into(),
CollabAgentState {
status: crate::protocol::v2::CollabAgentStatus::Running,
message: None,
},
)]
.into_iter()
.collect(),
}
);
}
#[test]
fn rollback_failed_error_does_not_mark_turn_failed() {
let events = vec![

View File

@@ -3946,6 +3946,10 @@ pub enum ThreadItem {
receiver_thread_ids: Vec<String>,
/// Prompt text sent as part of the collab tool call, when available.
prompt: Option<String>,
/// Model requested for the spawned agent, when applicable.
model: Option<String>,
/// Reasoning effort requested for the spawned agent, when applicable.
reasoning_effort: Option<ReasoningEffort>,
/// Last known status of the target agents, when available.
agents_states: HashMap<String, CollabAgentState>,
},

View File

@@ -862,6 +862,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: Vec::new(),
prompt: Some(begin_event.prompt),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
@@ -899,6 +901,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: Some(end_event.prompt),
model: Some(end_event.model),
reasoning_effort: Some(end_event.reasoning_effort),
agents_states,
};
let notification = ItemCompletedNotification {
@@ -919,6 +923,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: Some(begin_event.prompt),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
@@ -945,6 +951,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id.clone()],
prompt: Some(end_event.prompt),
model: None,
reasoning_effort: None,
agents_states: [(receiver_id, received_status)].into_iter().collect(),
};
let notification = ItemCompletedNotification {
@@ -969,6 +977,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
@@ -1005,6 +1015,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: None,
model: None,
reasoning_effort: None,
agents_states,
};
let notification = ItemCompletedNotification {
@@ -1024,6 +1036,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
@@ -1064,6 +1078,8 @@ pub(crate) async fn apply_bespoke_event_handling(
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: end_event.model,
reasoning_effort: end_event.reasoning_effort,
agents_states,
};
let notification = ItemCompletedNotification {
@@ -2515,6 +2531,8 @@ fn collab_resume_begin_item(
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
}
}
@@ -2539,6 +2557,8 @@ fn collab_resume_end_item(end_event: codex_protocol::protocol::CollabResumeEndEv
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
model: end_event.model,
reasoning_effort: end_event.reasoning_effort,
agents_states,
}
}
@@ -2911,6 +2931,8 @@ mod tests {
sender_thread_id: event.sender_thread_id.to_string(),
receiver_thread_ids: vec![event.receiver_thread_id.to_string()],
prompt: None,
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
};
assert_eq!(item, expected);
@@ -2924,6 +2946,8 @@ mod tests {
receiver_thread_id: ThreadId::new(),
receiver_agent_nickname: None,
receiver_agent_role: None,
model: Some("gpt-5.4-mini".to_string()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
status: codex_protocol::protocol::AgentStatus::NotFound,
};
@@ -2936,6 +2960,8 @@ mod tests {
sender_thread_id: event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id.clone()],
prompt: None,
model: Some("gpt-5.4-mini".to_string()),
reasoning_effort: Some(codex_protocol::openai_models::ReasoningEffort::High),
agents_states: [(
receiver_id,
V2CollabAgentStatus::from(codex_protocol::protocol::AgentStatus::NotFound),

View File

@@ -13,6 +13,10 @@ use codex_app_server::INPUT_TOO_LARGE_ERROR_CODE;
use codex_app_server::INVALID_PARAMS_ERROR_CODE;
use codex_app_server_protocol::ByteRange;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::CollabAgentState;
use codex_app_server_protocol::CollabAgentStatus;
use codex_app_server_protocol::CollabAgentTool;
use codex_app_server_protocol::CollabAgentToolCallStatus;
use codex_app_server_protocol::CommandExecutionApprovalDecision;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
@@ -68,6 +72,12 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
const TEST_ORIGINATOR: &str = "codex_vscode";
const LOCAL_PRAGMATIC_TEMPLATE: &str = "You are a deeply pragmatic, effective software engineer.";
fn body_contains(req: &wiremock::Request, text: &str) -> bool {
String::from_utf8(req.body.clone())
.ok()
.is_some_and(|body| body.contains(text))
}
#[tokio::test]
async fn turn_start_sends_originator_header() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
@@ -1658,6 +1668,198 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn turn_start_omits_spawn_agent_model_metadata_until_completion_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
const CHILD_PROMPT: &str = "child: do work";
const PARENT_PROMPT: &str = "spawn a child and continue";
const SPAWN_CALL_ID: &str = "spawn-call-1";
const REQUESTED_MODEL: &str = "gpt-5.1";
const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low;
let server = responses::start_mock_server().await;
let spawn_args = serde_json::to_string(&json!({
"message": CHILD_PROMPT,
"model": REQUESTED_MODEL,
"reasoning_effort": REQUESTED_REASONING_EFFORT,
}))?;
let _parent_turn = responses::mount_sse_once_match(
&server,
|req: &wiremock::Request| body_contains(req, PARENT_PROMPT),
responses::sse(vec![
responses::ev_response_created("resp-turn1-1"),
responses::ev_function_call(SPAWN_CALL_ID, "spawn_agent", &spawn_args),
responses::ev_completed("resp-turn1-1"),
]),
)
.await;
let _child_turn = responses::mount_sse_once_match(
&server,
|req: &wiremock::Request| {
body_contains(req, CHILD_PROMPT) && !body_contains(req, SPAWN_CALL_ID)
},
responses::sse(vec![
responses::ev_response_created("resp-child-1"),
responses::ev_assistant_message("msg-child-1", "child done"),
responses::ev_completed("resp-child-1"),
]),
)
.await;
let _parent_follow_up = responses::mount_sse_once_match(
&server,
|req: &wiremock::Request| body_contains(req, SPAWN_CALL_ID),
responses::sse(vec![
responses::ev_response_created("resp-turn1-2"),
responses::ev_assistant_message("msg-turn1-2", "parent done"),
responses::ev_completed("resp-turn1-2"),
]),
)
.await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Collab, true)]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: PARENT_PROMPT.to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
let spawn_started = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let started_notif = mcp
.read_stream_until_notification_message("item/started")
.await?;
let started: ItemStartedNotification =
serde_json::from_value(started_notif.params.expect("item/started params"))?;
if let ThreadItem::CollabAgentToolCall { id, .. } = &started.item
&& id == SPAWN_CALL_ID
{
return Ok::<ThreadItem, anyhow::Error>(started.item);
}
}
})
.await??;
assert_eq!(
spawn_started,
ThreadItem::CollabAgentToolCall {
id: SPAWN_CALL_ID.to_string(),
tool: CollabAgentTool::SpawnAgent,
status: CollabAgentToolCallStatus::InProgress,
sender_thread_id: thread.id.clone(),
receiver_thread_ids: Vec::new(),
prompt: Some(CHILD_PROMPT.to_string()),
model: None,
reasoning_effort: None,
agents_states: HashMap::new(),
}
);
let spawn_completed = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let completed_notif = mcp
.read_stream_until_notification_message("item/completed")
.await?;
let completed: ItemCompletedNotification =
serde_json::from_value(completed_notif.params.expect("item/completed params"))?;
if let ThreadItem::CollabAgentToolCall { id, .. } = &completed.item
&& id == SPAWN_CALL_ID
{
return Ok::<ThreadItem, anyhow::Error>(completed.item);
}
}
})
.await??;
let ThreadItem::CollabAgentToolCall {
id,
tool,
status,
sender_thread_id,
receiver_thread_ids,
prompt,
model,
reasoning_effort,
agents_states,
} = spawn_completed
else {
unreachable!("loop ensures we break on collab agent tool call items");
};
let receiver_thread_id = receiver_thread_ids
.first()
.cloned()
.expect("spawn completion should include child thread id");
assert_eq!(id, SPAWN_CALL_ID);
assert_eq!(tool, CollabAgentTool::SpawnAgent);
assert_eq!(status, CollabAgentToolCallStatus::Completed);
assert_eq!(sender_thread_id, thread.id);
assert_eq!(receiver_thread_ids, vec![receiver_thread_id.clone()]);
assert_eq!(prompt, Some(CHILD_PROMPT.to_string()));
assert_eq!(model, Some(REQUESTED_MODEL.to_string()));
assert_eq!(reasoning_effort, Some(REQUESTED_REASONING_EFFORT));
assert_eq!(
agents_states,
HashMap::from([(
receiver_thread_id,
CollabAgentState {
status: CollabAgentStatus::PendingInit,
message: None,
},
)])
);
let turn_completed = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let turn_completed_notif = mcp
.read_stream_until_notification_message("turn/completed")
.await?;
let turn_completed: TurnCompletedNotification = serde_json::from_value(
turn_completed_notif.params.expect("turn/completed params"),
)?;
if turn_completed.thread_id == thread.id && turn_completed.turn.id == turn.turn.id {
return Ok::<TurnCompletedNotification, anyhow::Error>(turn_completed);
}
}
})
.await??;
assert_eq!(turn_completed.thread_id, thread.id);
assert_eq!(turn_completed.turn.id, turn.turn.id);
Ok(())
}
#[tokio::test]
async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Result<()> {
skip_if_no_network!(Ok(()));

View File

@@ -74,6 +74,14 @@ pub(crate) struct AgentControl {
state: Arc<Guards>,
}
#[derive(Debug, Clone)]
pub(crate) struct AgentMetadata {
pub(crate) nickname: Option<String>,
pub(crate) role: Option<String>,
pub(crate) model: String,
pub(crate) reasoning_effort: Option<codex_protocol::openai_models::ReasoningEffort>,
}
impl AgentControl {
/// Construct a new `AgentControl` that can spawn/message agents via the given manager state.
pub(crate) fn new(manager: Weak<ThreadManagerState>) -> Self {
@@ -347,17 +355,26 @@ impl AgentControl {
&self,
agent_id: ThreadId,
) -> Option<(Option<String>, Option<String>)> {
self.get_agent_metadata(agent_id)
.await
.map(|metadata| (metadata.nickname, metadata.role))
}
pub(crate) async fn get_agent_metadata(&self, agent_id: ThreadId) -> Option<AgentMetadata> {
let Ok(state) = self.upgrade() else {
return None;
};
let Ok(thread) = state.get_thread(agent_id).await else {
return None;
};
let session_source = thread.config_snapshot().await.session_source;
Some((
session_source.get_nickname(),
session_source.get_agent_role(),
))
let snapshot = thread.config_snapshot().await;
let session_source = snapshot.session_source;
Some(AgentMetadata {
nickname: session_source.get_nickname(),
role: session_source.get_agent_role(),
model: snapshot.model,
reasoning_effort: snapshot.reasoning_effort,
})
}
/// Subscribe to status updates for `agent_id`, yielding the latest value and changes.

View File

@@ -157,8 +157,6 @@ mod spawn {
call_id: call_id.clone(),
sender_thread_id: session.conversation_id,
prompt: prompt.clone(),
model: args.model.clone().unwrap_or_default(),
reasoning_effort: args.reasoning_effort.unwrap_or_default(),
}
.into(),
)
@@ -178,6 +176,8 @@ mod spawn {
.map_err(FunctionCallError::RespondToModel)?;
apply_spawn_agent_runtime_overrides(&mut config, turn.as_ref())?;
apply_spawn_agent_overrides(&mut config, child_depth);
let configured_model = config.model.clone().unwrap_or_default();
let configured_reasoning_effort = config.model_reasoning_effort.unwrap_or_default();
let result = session
.services
@@ -203,15 +203,30 @@ mod spawn {
),
Err(_) => (None, AgentStatus::NotFound),
};
let (new_agent_nickname, new_agent_role) = match new_thread_id {
Some(thread_id) => session
.services
.agent_control
.get_agent_nickname_and_role(thread_id)
.await
.unwrap_or((None, None)),
None => (None, None),
let agent_metadata = match new_thread_id {
Some(thread_id) => {
session
.services
.agent_control
.get_agent_metadata(thread_id)
.await
}
None => None,
};
let new_agent_nickname = agent_metadata
.as_ref()
.and_then(|metadata| metadata.nickname.clone());
let new_agent_role = agent_metadata
.as_ref()
.and_then(|metadata| metadata.role.clone());
let spawned_model = agent_metadata
.as_ref()
.map(|metadata| metadata.model.clone())
.unwrap_or(configured_model);
let spawned_reasoning_effort = agent_metadata
.as_ref()
.and_then(|metadata| metadata.reasoning_effort)
.unwrap_or(configured_reasoning_effort);
let nickname = new_agent_nickname.clone();
session
.send_event(
@@ -223,6 +238,8 @@ mod spawn {
new_agent_nickname,
new_agent_role,
prompt,
model: spawned_model,
reasoning_effort: spawned_reasoning_effort,
status,
}
.into(),
@@ -358,12 +375,25 @@ mod resume_agent {
) -> Result<FunctionToolOutput, FunctionCallError> {
let args: ResumeAgentArgs = parse_arguments(&arguments)?;
let receiver_thread_id = agent_id(&args.id)?;
let (receiver_agent_nickname, receiver_agent_role) = session
let receiver_agent_metadata = session
.services
.agent_control
.get_agent_nickname_and_role(receiver_thread_id)
.get_agent_metadata(receiver_thread_id)
.await
.unwrap_or((None, None));
.map(|metadata| {
let nickname = metadata.nickname;
let role = metadata.role;
let model = Some(metadata.model);
let reasoning_effort = metadata.reasoning_effort;
(nickname, role, model, reasoning_effort)
})
.unwrap_or((None, None, None, None));
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = receiver_agent_metadata;
let child_depth = next_thread_spawn_depth(&turn.session_source);
let max_depth = turn.config.agent_max_depth;
if exceeds_thread_spawn_depth_limit(child_depth, max_depth) {
@@ -411,12 +441,30 @@ mod resume_agent {
None
};
let (receiver_agent_nickname, receiver_agent_role) = session
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = session
.services
.agent_control
.get_agent_nickname_and_role(receiver_thread_id)
.get_agent_metadata(receiver_thread_id)
.await
.unwrap_or((receiver_agent_nickname, receiver_agent_role));
.map(|metadata| {
(
metadata.nickname,
metadata.role,
Some(metadata.model),
metadata.reasoning_effort,
)
})
.unwrap_or((
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
));
session
.send_event(
&turn,
@@ -426,6 +474,8 @@ mod resume_agent {
receiver_thread_id,
receiver_agent_nickname,
receiver_agent_role,
model: receiver_model,
reasoning_effort: receiver_reasoning_effort,
status: status.clone(),
}
.into(),
@@ -695,12 +745,25 @@ pub mod close_agent {
) -> Result<FunctionToolOutput, FunctionCallError> {
let args: CloseAgentArgs = parse_arguments(&arguments)?;
let agent_id = agent_id(&args.id)?;
let (receiver_agent_nickname, receiver_agent_role) = session
let (
receiver_agent_nickname,
receiver_agent_role,
receiver_model,
receiver_reasoning_effort,
) = session
.services
.agent_control
.get_agent_nickname_and_role(agent_id)
.get_agent_metadata(agent_id)
.await
.unwrap_or((None, None));
.map(|metadata| {
(
metadata.nickname,
metadata.role,
Some(metadata.model),
metadata.reasoning_effort,
)
})
.unwrap_or((None, None, None, None));
session
.send_event(
&turn,
@@ -730,6 +793,8 @@ pub mod close_agent {
receiver_thread_id: agent_id,
receiver_agent_nickname: receiver_agent_nickname.clone(),
receiver_agent_role: receiver_agent_role.clone(),
model: receiver_model.clone(),
reasoning_effort: receiver_reasoning_effort,
status,
}
.into(),
@@ -758,6 +823,8 @@ pub mod close_agent {
receiver_thread_id: agent_id,
receiver_agent_nickname,
receiver_agent_role,
model: receiver_model,
reasoning_effort: receiver_reasoning_effort,
status: status.clone(),
}
.into(),

View File

@@ -579,6 +579,8 @@ fn collab_spawn_begin_and_end_emit_item_events() {
new_agent_nickname: None,
new_agent_role: None,
prompt: prompt.clone(),
model: "gpt-5".to_string(),
reasoning_effort: ReasoningEffortConfig::default(),
status: AgentStatus::Running,
}),
);

View File

@@ -3132,8 +3132,6 @@ pub struct CollabAgentSpawnBeginEvent {
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
pub model: String,
pub reasoning_effort: ReasoningEffortConfig,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)]
@@ -3179,6 +3177,10 @@ pub struct CollabAgentSpawnEndEvent {
/// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the
/// beginning.
pub prompt: String,
/// Model requested for the spawned agent.
pub model: String,
/// Reasoning effort requested for the spawned agent.
pub reasoning_effort: ReasoningEffortConfig,
/// Last known status of the new agent reported to the sender agent.
pub status: AgentStatus,
}
@@ -3267,6 +3269,12 @@ pub struct CollabCloseEndEvent {
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option<String>,
/// Model configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Reasoning effort configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Last known status of the receiver agent reported to the sender agent before
/// the close.
pub status: AgentStatus,
@@ -3302,6 +3310,12 @@ pub struct CollabResumeEndEvent {
/// Optional role assigned to the receiver agent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub receiver_agent_role: Option<String>,
/// Model configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Reasoning effort configured for the receiver agent when available.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Last known status of the receiver agent reported to the sender agent after
/// resume.
pub status: AgentStatus,

View File

@@ -2040,6 +2040,8 @@ async fn collab_spawn_end_shows_requested_model_and_effort() {
new_agent_nickname: Some("Robie".to_string()),
new_agent_role: Some("explorer".to_string()),
prompt: "Explore the repo".to_string(),
model: "gpt-5".to_string(),
reasoning_effort: ReasoningEffortConfig::High,
status: AgentStatus::PendingInit,
}),
});

View File

@@ -183,6 +183,7 @@ pub(crate) fn spawn_end(
new_agent_role,
prompt,
status: _,
..
} = ev;
let title = match new_thread_id {
@@ -601,6 +602,8 @@ mod tests {
new_agent_nickname: Some("Robie".to_string()),
new_agent_role: Some("explorer".to_string()),
prompt: "Compute 11! and reply with just the integer result.".to_string(),
model: "gpt-5".to_string(),
reasoning_effort: ReasoningEffortConfig::High,
status: AgentStatus::PendingInit,
},
Some(&SpawnRequestSummary {
@@ -737,6 +740,8 @@ mod tests {
new_agent_nickname: Some("Robie".to_string()),
new_agent_role: Some("explorer".to_string()),
prompt: String::new(),
model: "gpt-5".to_string(),
reasoning_effort: ReasoningEffortConfig::High,
status: AgentStatus::PendingInit,
},
Some(&SpawnRequestSummary {