Compare commits

...

17 Commits

Author SHA1 Message Date
jif-oai
46fca9fc15 Merge remote-tracking branch 'origin/main' into jif/multi-agent-1 2025-12-16 17:05:15 +00:00
jif-oai
4cc89bb57d tool id 2025-12-16 17:05:09 +00:00
jif-oai
72aceb06ab Some fixes and stuff 2025-12-16 16:50:20 +00:00
jif-oai
29604a01b1 Some fixes 2025-12-16 15:56:38 +00:00
jif-oai
308b21dc09 Align specs 2025-12-16 15:31:09 +00:00
jif-oai
5cfb225f99 Add back regular tasks 2025-12-16 14:54:07 +00:00
jif-oai
6b006b8672 Merge remote-tracking branch 'origin/main' into jif/multi-agent-1
# Conflicts:
#	codex-rs/core/src/codex.rs
#	codex-rs/core/src/tasks/regular.rs
2025-12-16 14:48:13 +00:00
jif-oai
ec5b0ca07c Some improvements 2025-12-11 18:28:44 +00:00
jif-oai
d139b99535 Move on 2025-12-11 17:56:09 +00:00
jif-oai
7513b9a38b Dummy fix 2025-12-11 14:48:58 +00:00
jif-oai
8e271b665c fix merge 2025-12-11 14:46:37 +00:00
jif-oai
9e1e0e37aa Merge remote-tracking branch 'origin/main' into jif/multi-agent-1
# Conflicts:
#	codex-rs/core/src/codex.rs
#	codex-rs/tui/src/chatwidget/tests.rs
2025-12-11 14:30:00 +00:00
jif-oai
3ade391490 V6 2025-12-03 15:27:20 +00:00
jif-oai
0c1b6071a2 V5 2025-12-03 15:22:30 +00:00
jif-oai
5d988396d4 V4 2025-12-03 15:13:52 +00:00
jif-oai
42f7f5734f V2 2025-12-03 13:16:40 +00:00
jif-oai
2b1956f0ef V1 2025-12-03 12:44:00 +00:00
47 changed files with 3331 additions and 69 deletions

View File

@@ -87,6 +87,7 @@ pub(crate) async fn apply_bespoke_event_handling(
) {
let Event {
id: event_turn_id,
agent_idx: _,
msg,
} = event;
match msg {

View File

@@ -25,7 +25,7 @@ When using the planning tool:
- Do not make single-step plans.
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
## Codex CLI harness, sandboxing, and approvals
## Codex CLI harness, sandboxing, and approvals[q_and_a.rs](src/agents/builtins/q_and_a.rs)
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.

View File

@@ -0,0 +1,23 @@
mod orchestrator;
mod q_and_a;
mod reviewer;
mod worker;
use std::collections::HashMap;
use crate::agents::AgentDefinition;
pub(super) fn builtin_agents() -> HashMap<String, AgentDefinition> {
let mut agents = HashMap::new();
for agent in [
orchestrator::definition(),
worker::definition(),
reviewer::definition(),
q_and_a::definition(),
] {
agents.insert(agent.name.clone(), agent);
}
agents
}

View File

@@ -0,0 +1,13 @@
use crate::agents::AgentDefinition;
const PROMPT: &str = include_str!("../../../templates/agents/orchestrator.md");
pub(super) fn definition() -> AgentDefinition {
AgentDefinition {
name: "orchestrator".to_string(),
instructions: Some(PROMPT.to_string()),
sub_agents: ["worker", "reviewer", "q_and_a"].iter().map(|s| s.to_string()).collect(),
read_only: true,
..Default::default()
}
}

View File

@@ -0,0 +1,15 @@
use codex_protocol::openai_models::ReasoningEffort;
use crate::agents::AgentDefinition;
const PROMPT: &str = include_str!("../../../templates/agents/q_and_a.md");
pub(super) fn definition() -> AgentDefinition {
AgentDefinition {
name: "q_and_a".to_string(),
instructions: Some(PROMPT.to_string()),
read_only: true,
model: Some("gpt-5.2".to_string()),
reasoning_effort: Some(ReasoningEffort::High),
..Default::default()
}
}

View File

@@ -0,0 +1,15 @@
use codex_protocol::openai_models::ReasoningEffort;
use crate::agents::AgentDefinition;
const PROMPT: &str = include_str!("../../../templates/agents/reviewer.md");
pub(super) fn definition() -> AgentDefinition {
AgentDefinition {
name: "reviewer".to_string(),
instructions: Some(PROMPT.to_string()),
read_only: true,
model: Some("gpt-5.2".to_string()),
reasoning_effort: Some(ReasoningEffort::High),
..Default::default()
}
}

View File

@@ -0,0 +1,11 @@
use crate::agents::AgentDefinition;
const PROMPT: &str = include_str!("../../../gpt-5.1-codex-max_prompt.md");
pub(super) fn definition() -> AgentDefinition {
AgentDefinition {
name: "worker".to_string(),
instructions: Some(PROMPT.to_string()),
..Default::default()
}
}

View File

@@ -0,0 +1,154 @@
mod builtins;
use std::collections::HashMap;
use std::path::Path;
use codex_protocol::openai_models::ReasoningEffort;
use serde::Deserialize;
use tracing::warn;
use builtins::builtin_agents;
#[derive(Debug, Clone, Default)]
pub(crate) struct AgentDefinition {
pub(crate) name: String,
pub(crate) instructions: Option<String>,
pub(crate) sub_agents: Vec<String>,
pub(crate) read_only: bool,
pub(crate) model: Option<String>,
pub(crate) reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Debug, Clone)]
pub(crate) struct AgentsConfig {
agents: HashMap<String, AgentDefinition>,
}
#[derive(Debug, Deserialize)]
struct RawAgentDefinition {
#[serde(default, alias = "prompt")]
instructions: Option<String>,
#[serde(default)]
sub_agents: Vec<String>,
#[serde(default)]
read_only: bool,
#[serde(default)]
model: Option<String>,
#[serde(default)]
reasoning_effort: Option<ReasoningEffort>,
}
impl AgentsConfig {
pub(crate) const FILE_NAME: &'static str = "agents.toml";
pub(crate) async fn try_load(codex_home: &Path) -> Option<Self> {
let mut agents = builtin_agents();
let path = codex_home.join(Self::FILE_NAME);
let content = match tokio::fs::read_to_string(&path).await {
Ok(content) => Some(content),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => None,
Err(err) => {
warn!("failed to read {}: {err}", path.display());
None
}
};
if let Some(content) = content {
match Self::from_toml_str(&content) {
Ok(custom_agents) => {
for (name, agent) in custom_agents {
if agents.contains_key(&name) {
warn!(
"duplicate agent definition {name} in {} ignored",
path.display()
);
continue;
}
agents.insert(name, agent);
}
}
Err(err) => {
warn!("failed to parse {}: {err}", path.display());
}
}
}
if let Err(err) = Self::validate_agents(&agents) {
warn!("failed to validate {}: {err}", path.display());
agents = builtin_agents();
}
if let Err(err) = Self::validate_agents(&agents) {
warn!("invalid built-in agents config: {err}");
return None;
}
Some(Self { agents })
}
fn from_toml_str(contents: &str) -> Result<HashMap<String, AgentDefinition>, String> {
let raw: HashMap<String, RawAgentDefinition> =
toml::from_str(contents).map_err(|err| format!("invalid toml: {err}"))?;
let mut agents = HashMap::new();
for (name, agent) in raw {
if let Some(model) = agent.model.as_deref()
&& model.trim().is_empty()
{
return Err(format!("agent {name}: model must be non-empty when set"));
}
let instructions = agent.instructions.and_then(|instructions| {
if instructions.trim().is_empty() {
None
} else {
Some(instructions)
}
});
agents.insert(
name.clone(),
AgentDefinition {
name,
instructions,
sub_agents: agent.sub_agents,
read_only: agent.read_only,
model: agent.model,
reasoning_effort: agent.reasoning_effort,
},
);
}
Ok(agents)
}
fn validate_agents(agents: &HashMap<String, AgentDefinition>) -> Result<(), String> {
if !agents.contains_key("orchestrator") {
return Err("missing required agent: orchestrator".to_string());
}
for agent in agents.values() {
for sub in &agent.sub_agents {
if !agents.contains_key(sub) {
return Err(format!(
"agent {name}: unknown sub_agent {sub}",
name = agent.name.as_str()
));
}
}
}
Ok(())
}
pub(crate) fn agent(&self, name: &str) -> Option<&AgentDefinition> {
self.agents.get(name)
}
pub(crate) fn main(&self) -> &AgentDefinition {
self.agents
.get("orchestrator")
.expect("agents config validated orchestrator agent")
}
}

View File

@@ -126,8 +126,13 @@ use crate::skills::SkillMetadata;
use crate::skills::SkillsManager;
use crate::skills::build_skill_injections;
use crate::state::ActiveTurn;
use crate::state::AgentId;
use crate::state::AgentState;
use crate::state::CollaborationLimits;
use crate::state::CollaborationState;
use crate::state::SessionServices;
use crate::state::SessionState;
use crate::tasks::CollaborationSupervisor;
use crate::tasks::GhostSnapshotTask;
use crate::tasks::ReviewTask;
use crate::tasks::SessionTask;
@@ -195,6 +200,7 @@ fn maybe_push_chat_wire_api_deprecation(
post_session_configured_events.push(Event {
id: INITIAL_SUBMIT_ID.to_owned(),
agent_idx: None,
msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent {
summary: CHAT_WIRE_API_DEPRECATION_SUMMARY.to_string(),
details: None,
@@ -345,6 +351,10 @@ pub(crate) struct Session {
features: Features,
pub(crate) active_turn: Mutex<Option<ActiveTurn>>,
pub(crate) services: SessionServices,
agents_config: Option<Arc<crate::agents::AgentsConfig>>,
default_sandbox_policy: SandboxPolicy,
collaboration: Mutex<CollaborationState>,
collaboration_supervisor: Mutex<Option<CollaborationSupervisor>>,
next_internal_sub_id: AtomicU64,
}
@@ -371,6 +381,7 @@ pub(crate) struct TurnContext {
pub(crate) tool_call_gate: Arc<ReadinessFlag>,
pub(crate) exec_policy: Arc<RwLock<ExecPolicy>>,
pub(crate) truncation_policy: TruncationPolicy,
pub(crate) collaboration_agent: AgentId,
}
impl TurnContext {
@@ -385,9 +396,13 @@ impl TurnContext {
.as_deref()
.unwrap_or(compact::SUMMARIZATION_PROMPT)
}
pub(crate) fn collaboration_agent(&self) -> AgentId {
self.collaboration_agent.clone()
}
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub(crate) struct SessionConfiguration {
/// Provider identifier ("openai", "openrouter", ...).
provider: ModelProviderInfo,
@@ -456,6 +471,40 @@ impl SessionConfiguration {
}
next_configuration
}
pub(crate) fn developer_instructions(&self) -> Option<String> {
self.developer_instructions.clone()
}
pub(crate) fn user_instructions(&self) -> Option<String> {
self.user_instructions.clone()
}
pub(crate) fn with_instructions(
mut self,
developer_instructions: Option<String>,
user_instructions: Option<String>,
) -> Self {
self.developer_instructions = developer_instructions;
self.user_instructions = user_instructions;
self
}
pub(crate) fn approval_policy(&self) -> AskForApproval {
self.approval_policy
}
pub(crate) fn sandbox_policy(&self) -> SandboxPolicy {
self.sandbox_policy.clone()
}
pub(crate) fn cwd(&self) -> &PathBuf {
&self.cwd
}
pub(crate) fn model(&self) -> &str {
self.model.as_str()
}
}
#[derive(Default, Clone)]
@@ -491,6 +540,7 @@ impl Session {
model_family: ModelFamily,
conversation_id: ConversationId,
sub_id: String,
agent_id: AgentId,
) -> TurnContext {
let otel_manager = otel_manager.clone().with_model(
session_configuration.model.as_str(),
@@ -536,9 +586,43 @@ impl Session {
per_turn_config.as_ref(),
model_family.truncation_policy,
),
collaboration_agent: agent_id,
}
}
pub(crate) async fn make_collaboration_turn_context(
&self,
agent: &AgentState,
sub_id: String,
) -> Arc<TurnContext> {
let per_turn_config = Self::build_per_turn_config(&agent.config);
let model_family = self
.services
.models_manager
.construct_model_family(agent.config.model.as_str(), &per_turn_config)
.await;
let mut turn_context = Self::make_turn_context(
Some(Arc::clone(&self.services.auth_manager)),
&self.services.otel_manager,
agent.config.provider.clone(),
&agent.config,
per_turn_config,
model_family,
self.conversation_id,
sub_id,
agent.id.clone(),
);
if let Some(agents_config) = self.agents_config()
&& let Some(agent_config) = agents_config.agent(agent.name.as_str())
{
let allowlist = agent_config.sub_agents.clone();
if !allowlist.is_empty() {
turn_context.tools_config.collaboration_agent_allowlist = Some(allowlist);
}
}
Arc::new(turn_context)
}
#[allow(clippy::too_many_arguments)]
async fn new(
session_configuration: SessionConfiguration,
@@ -550,6 +634,25 @@ impl Session {
session_source: SessionSource,
skills_manager: Arc<SkillsManager>,
) -> anyhow::Result<Arc<Self>> {
let agents_config = crate::agents::AgentsConfig::try_load(&config.codex_home).await;
let default_sandbox_policy = session_configuration.sandbox_policy.clone();
let mut session_configuration = session_configuration;
if let Some(agents_config) = agents_config.as_ref() {
let main = agents_config.main();
session_configuration.developer_instructions = main.instructions.clone();
session_configuration.user_instructions = None;
if let Some(model) = main.model.clone() {
session_configuration.model = model;
}
if let Some(effort) = main.reasoning_effort {
session_configuration.model_reasoning_effort = Some(effort);
}
if main.read_only {
session_configuration.sandbox_policy = SandboxPolicy::ReadOnly;
}
}
debug!(
"Configuring session: model={}; provider={:?}",
session_configuration.model, session_configuration.provider
@@ -616,6 +719,7 @@ impl Session {
};
post_session_configured_events.push(Event {
id: INITIAL_SUBMIT_ID.to_owned(),
agent_idx: Some(0),
msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { summary, details }),
});
}
@@ -655,6 +759,7 @@ impl Session {
.map(Arc::new);
}
let state = SessionState::new(session_configuration.clone());
let collaboration = CollaborationState::new(CollaborationLimits::default());
let services = SessionServices {
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
@@ -678,6 +783,10 @@ impl Session {
features: config.features.clone(),
active_turn: Mutex::new(None),
services,
agents_config: agents_config.map(Arc::new),
default_sandbox_policy,
collaboration: Mutex::new(collaboration),
collaboration_supervisor: Mutex::new(None),
next_internal_sub_id: AtomicU64::new(0),
});
@@ -686,6 +795,7 @@ impl Session {
let initial_messages = initial_history.get_event_msgs();
let events = std::iter::once(Event {
id: INITIAL_SUBMIT_ID.to_owned(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: session_configuration.model.clone(),
@@ -736,6 +846,30 @@ impl Session {
self.tx_event.clone()
}
pub(crate) fn collaboration_state(&self) -> &Mutex<CollaborationState> {
&self.collaboration
}
pub(crate) fn agents_config(&self) -> Option<&crate::agents::AgentsConfig> {
self.agents_config.as_deref()
}
pub(crate) fn default_sandbox_policy(&self) -> &SandboxPolicy {
&self.default_sandbox_policy
}
pub(crate) async fn ensure_collaboration_supervisor(
self: &Arc<Self>,
) -> CollaborationSupervisor {
let mut guard = self.collaboration_supervisor.lock().await;
if let Some(existing) = guard.clone() {
return existing;
}
let handle = CollaborationSupervisor::spawn(Arc::clone(self));
*guard = Some(handle.clone());
handle
}
/// Ensure all rollout writes are durably flushed.
pub(crate) async fn flush_rollout(&self) {
let recorder = {
@@ -870,6 +1004,7 @@ impl Session {
.models_manager
.construct_model_family(session_configuration.model.as_str(), &per_turn_config)
.await;
let root_id = AgentId::root();
let mut turn_context: TurnContext = Self::make_turn_context(
Some(Arc::clone(&self.services.auth_manager)),
&self.services.otel_manager,
@@ -879,13 +1014,37 @@ impl Session {
model_family,
self.conversation_id,
sub_id,
root_id.clone(),
);
if let Some(final_schema) = updates.final_output_json_schema {
turn_context.final_output_json_schema = final_schema;
}
if let Some(agents_config) = self.agents_config() {
let allowlist = agents_config.main().sub_agents.clone();
if !allowlist.is_empty() {
turn_context.tools_config.collaboration_agent_allowlist = Some(allowlist);
}
}
let session_history = self.clone_history().await;
let mut collab = self.collaboration.lock().await;
collab.ensure_root_agent(&session_configuration, &session_history);
drop(collab);
self.register_sub_id(&root_id, turn_context.sub_id.clone())
.await;
Arc::new(turn_context)
}
pub(crate) async fn register_sub_id(&self, agent: &AgentId, sub_id: String) {
let mut collab = self.collaboration.lock().await;
collab.register_sub_id(agent, sub_id);
}
pub(crate) async fn current_session_configuration(&self) -> SessionConfiguration {
let state = self.state.lock().await;
state.session_configuration.clone()
}
fn build_environment_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
@@ -908,9 +1067,21 @@ impl Session {
/// Persist the event to rollout and send it to clients.
pub(crate) async fn send_event(&self, turn_context: &TurnContext, msg: EventMsg) {
let agent = turn_context.collaboration_agent();
let is_root = agent.is_root();
if !Self::should_emit_event_for_agent(is_root, &msg) {
return;
}
let legacy_source = msg.clone();
let agent_idx = if is_root {
Some(0)
} else {
let collab = self.collaboration.lock().await;
collab.agent_index(&agent)
};
let event = Event {
id: turn_context.sub_id.clone(),
agent_idx,
msg,
};
self.send_event_raw(event).await;
@@ -919,13 +1090,42 @@ impl Session {
for legacy in legacy_source.as_legacy_events(show_raw_agent_reasoning) {
let legacy_event = Event {
id: turn_context.sub_id.clone(),
agent_idx,
msg: legacy,
};
self.send_event_raw(legacy_event).await;
}
}
fn should_emit_event_for_agent(is_root: bool, msg: &EventMsg) -> bool {
if is_root {
return true;
}
!matches!(
msg,
EventMsg::AgentMessage(_)
| EventMsg::AgentMessageDelta(_)
| EventMsg::AgentMessageContentDelta(_)
| EventMsg::AgentReasoning(_)
| EventMsg::AgentReasoningDelta(_)
| EventMsg::AgentReasoningRawContent(_)
| EventMsg::AgentReasoningRawContentDelta(_)
| EventMsg::AgentReasoningSectionBreak(_)
| EventMsg::ReasoningContentDelta(_)
| EventMsg::ReasoningRawContentDelta(_)
| EventMsg::RawResponseItem(_)
| EventMsg::UserMessage(_)
| EventMsg::TurnDiff(_)
)
}
pub(crate) async fn send_event_raw(&self, event: Event) {
if let Some(agent_idx) = event.agent_idx
&& agent_idx != 0
&& !Self::should_emit_event_for_agent(false, &event.msg)
{
return;
}
// Persist the event into rollout (recorder filters as needed)
let rollout_items = vec![RolloutItem::EventMsg(event.msg.clone())];
self.persist_rollout_items(&rollout_items).await;
@@ -1168,8 +1368,33 @@ impl Session {
items: &[ResponseItem],
turn_context: &TurnContext,
) {
let mut state = self.state.lock().await;
state.record_items(items.iter(), turn_context.truncation_policy);
let agent = turn_context.collaboration_agent();
if agent.is_root() {
let (history, token_info, config) = {
let mut state = self.state.lock().await;
state.record_items(items.iter(), turn_context.truncation_policy);
(
state.clone_history(),
state.token_info(),
state.session_configuration.clone(),
)
};
let mut collab = self.collaboration.lock().await;
collab.ensure_root_agent(&config, &history);
let root_id = AgentId::root();
if let Some(root) = collab.agent_mut(&root_id) {
root.history = history;
root.history.set_token_info(token_info);
}
} else {
let mut collab = self.collaboration.lock().await;
if let Some(agent_state) = collab.agent_mut(&agent) {
agent_state
.history
.record_items(items.iter(), turn_context.truncation_policy);
}
}
}
pub(crate) async fn record_model_warning(&self, message: impl Into<String>, ctx: &TurnContext) {
@@ -1189,8 +1414,8 @@ impl Session {
}
pub(crate) async fn replace_history(&self, items: Vec<ResponseItem>) {
let mut state = self.state.lock().await;
state.replace_history(items);
let root_id = AgentId::root();
self.set_history_for_agent(&root_id, items, None).await;
}
async fn persist_rollout_response_items(&self, items: &[ResponseItem]) {
@@ -1257,8 +1482,35 @@ impl Session {
}
pub(crate) async fn clone_history(&self) -> ContextManager {
let state = self.state.lock().await;
state.clone_history()
let root_id = AgentId::root();
self.clone_history_for_agent(&root_id).await
}
pub(crate) async fn clone_history_for_agent(&self, agent: &AgentId) -> ContextManager {
if agent.is_root() {
let state = self.state.lock().await;
return state.clone_history();
}
let collab = self.collaboration.lock().await;
collab
.clone_agent_history(agent)
.unwrap_or_else(ContextManager::new)
}
pub(crate) async fn set_history_for_agent(
&self,
agent: &AgentId,
items: Vec<ResponseItem>,
token_info: Option<TokenUsageInfo>,
) {
if agent.is_root() {
let mut state = self.state.lock().await;
state.replace_history(items.clone());
state.set_token_info(token_info.clone());
}
let mut collab = self.collaboration.lock().await;
let _ = collab.set_agent_history(agent, items, token_info);
}
pub(crate) async fn update_token_usage_info(
@@ -1266,49 +1518,94 @@ impl Session {
turn_context: &TurnContext,
token_usage: Option<&TokenUsage>,
) {
{
let mut state = self.state.lock().await;
if let Some(token_usage) = token_usage {
state.update_token_info_from_usage(
token_usage,
turn_context.client.get_model_context_window(),
);
let agent = turn_context.collaboration_agent();
if agent == AgentId::root() {
let token_info = {
let mut state = self.state.lock().await;
if let Some(token_usage) = token_usage {
state.update_token_info_from_usage(
token_usage,
turn_context.client.get_model_context_window(),
);
}
state.token_info()
};
{
let mut collab = self.collaboration.lock().await;
if let Some(root) = collab.agent_mut(AgentId::root()) {
root.history.set_token_info(token_info);
}
}
self.send_token_count_event(turn_context).await;
} else if let Some(token_usage) = token_usage {
let mut collab = self.collaboration.lock().await;
if let Some(agent_state) = collab.agent_mut(agent) {
agent_state
.history
.update_token_info(token_usage, turn_context.client.get_model_context_window());
}
}
self.send_token_count_event(turn_context).await;
}
pub(crate) async fn recompute_token_usage(&self, turn_context: &TurnContext) {
let Some(estimated_total_tokens) = self
.clone_history()
.await
.estimate_token_count(turn_context)
else {
let agent = turn_context.collaboration_agent();
let history = self.clone_history_for_agent(&agent).await;
let Some(estimated_total_tokens) = history.estimate_token_count(turn_context) else {
return;
};
{
let mut state = self.state.lock().await;
let mut info = state.token_info().unwrap_or(TokenUsageInfo {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: None,
});
info.last_token_usage = TokenUsage {
input_tokens: 0,
cached_input_tokens: 0,
output_tokens: 0,
reasoning_output_tokens: 0,
total_tokens: estimated_total_tokens.max(0),
if agent == AgentId::root() {
let token_info = {
let mut state = self.state.lock().await;
let mut info = state.token_info().unwrap_or(TokenUsageInfo {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: None,
});
info.last_token_usage = TokenUsage {
input_tokens: 0,
cached_input_tokens: 0,
output_tokens: 0,
reasoning_output_tokens: 0,
total_tokens: estimated_total_tokens.max(0),
};
if info.model_context_window.is_none() {
info.model_context_window = turn_context.client.get_model_context_window();
}
state.set_token_info(Some(info.clone()));
info
};
if info.model_context_window.is_none() {
info.model_context_window = turn_context.client.get_model_context_window();
{
let mut collab = self.collaboration.lock().await;
if let Some(root) = collab.agent_mut(AgentId::root()) {
root.history.set_token_info(Some(token_info));
}
}
self.send_token_count_event(turn_context).await;
} else {
let mut collab = self.collaboration.lock().await;
if let Some(agent_state) = collab.agent_mut(agent) {
let mut info = agent_state.history.token_info().unwrap_or(TokenUsageInfo {
total_token_usage: TokenUsage::default(),
last_token_usage: TokenUsage::default(),
model_context_window: None,
});
info.last_token_usage = TokenUsage {
input_tokens: 0,
cached_input_tokens: 0,
output_tokens: 0,
reasoning_output_tokens: 0,
total_tokens: estimated_total_tokens.max(0),
};
if info.model_context_window.is_none() {
info.model_context_window = turn_context.client.get_model_context_window();
}
agent_state.history.set_token_info(Some(info));
}
state.set_token_info(Some(info));
}
self.send_token_count_event(turn_context).await;
}
pub(crate) async fn update_rate_limits(
@@ -1333,13 +1630,28 @@ impl Session {
}
pub(crate) async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
let agent = turn_context.collaboration_agent();
let context_window = turn_context.client.get_model_context_window();
if let Some(context_window) = context_window {
{
let mut state = self.state.lock().await;
state.set_token_usage_full(context_window);
if agent == AgentId::root() {
let token_info = {
let mut state = self.state.lock().await;
state.set_token_usage_full(context_window);
state.token_info()
};
{
let mut collab = self.collaboration.lock().await;
if let Some(root) = collab.agent_mut(AgentId::root()) {
root.history.set_token_info(token_info);
}
}
self.send_token_count_event(turn_context).await;
} else {
let mut collab = self.collaboration.lock().await;
if let Some(agent_state) = collab.agent_mut(agent) {
agent_state.history.set_token_usage_full(context_window);
}
}
self.send_token_count_event(turn_context).await;
}
}
@@ -1635,6 +1947,7 @@ mod handlers {
use crate::mcp::auth::compute_auth_statuses;
use crate::mcp::collect_mcp_snapshot_from_manager;
use crate::review_prompts::resolve_review_request;
use crate::tasks::CollaborationTask;
use crate::tasks::CompactTask;
use crate::tasks::RegularTask;
use crate::tasks::UndoTask;
@@ -1717,8 +2030,13 @@ mod handlers {
.await;
}
sess.spawn_task(Arc::clone(&current_context), items, RegularTask)
.await;
if sess.enabled(Feature::MultiAgents) {
sess.spawn_task(Arc::clone(&current_context), items, CollaborationTask)
.await;
} else {
sess.spawn_task(Arc::clone(&current_context), items, RegularTask)
.await;
}
*previous_context = Some(current_context);
}
}
@@ -1782,6 +2100,7 @@ mod handlers {
let warning = EventMsg::Warning(WarningEvent { message });
sess.send_event_raw(Event {
id: id.clone(),
agent_idx: Some(0),
msg: warning,
})
.await;
@@ -1833,6 +2152,7 @@ mod handlers {
let event = Event {
id: sub_id,
agent_idx: Some(0),
msg: EventMsg::GetHistoryEntryResponse(
crate::protocol::GetHistoryEntryResponseEvent {
offset,
@@ -1863,6 +2183,7 @@ mod handlers {
.await;
let event = Event {
id: sub_id,
agent_idx: Some(0),
msg: EventMsg::McpListToolsResponse(snapshot),
};
sess.send_event_raw(event).await;
@@ -1878,6 +2199,7 @@ mod handlers {
let event = Event {
id: sub_id,
agent_idx: Some(0),
msg: EventMsg::ListCustomPromptsResponse(ListCustomPromptsResponseEvent {
custom_prompts,
}),
@@ -1917,6 +2239,7 @@ mod handlers {
};
let event = Event {
id: sub_id,
agent_idx: None,
msg: EventMsg::ListSkillsResponse(ListSkillsResponseEvent { skills }),
};
sess.send_event_raw(event).await;
@@ -1965,6 +2288,7 @@ mod handlers {
warn!("failed to shutdown rollout recorder: {e}");
let event = Event {
id: sub_id.clone(),
agent_idx: Some(0),
msg: EventMsg::Error(ErrorEvent {
message: "Failed to shutdown rollout recorder".to_string(),
codex_error_info: Some(CodexErrorInfo::Other),
@@ -1975,6 +2299,7 @@ mod handlers {
let event = Event {
id: sub_id,
agent_idx: Some(0),
msg: EventMsg::ShutdownComplete,
};
sess.send_event_raw(event).await;
@@ -2004,6 +2329,7 @@ mod handlers {
Err(err) => {
let event = Event {
id: sub_id,
agent_idx: Some(0),
msg: EventMsg::Error(ErrorEvent {
message: err.to_string(),
codex_error_info: Some(CodexErrorInfo::Other),
@@ -2087,6 +2413,7 @@ async fn spawn_review_thread(
tool_call_gate: Arc::new(ReadinessFlag::new()),
exec_policy: parent_turn_context.exec_policy.clone(),
truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy),
collaboration_agent: AgentId::root(),
};
// Seed the child task with the review prompt as the initial user message.
@@ -2411,6 +2738,31 @@ async fn run_turn(
}
}
pub(crate) async fn run_collaboration_turn(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
turn_diff_tracker: SharedTurnDiffTracker,
input: Vec<ResponseItem>,
cancellation_token: CancellationToken,
) -> CodexResult<(bool, Option<String>)> {
let TurnRunResult {
needs_follow_up,
last_agent_message,
} = run_turn(
sess,
turn_context,
turn_diff_tracker,
input,
cancellation_token,
)
.await?;
Ok((needs_follow_up, last_agent_message))
}
/// When the model is prompted, it returns a stream of events. Some of these
/// events map to a `ResponseItem`. A `ResponseItem` may need to be
/// "handled" such that it produces a `ResponseInputItem` that needs to be
/// sent back to the model on the next turn.
#[derive(Debug)]
struct TurnRunResult {
needs_follow_up: bool,
@@ -2647,7 +2999,7 @@ async fn try_run_turn(
outcome
}
pub(super) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option<String> {
pub(crate) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option<String> {
responses.iter().rev().find_map(|item| {
if let ResponseItem::Message { role, content, .. } = item {
if role == "assistant" {
@@ -3076,6 +3428,7 @@ mod tests {
let state = SessionState::new(session_configuration.clone());
let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone()));
let collaboration = CollaborationState::new(CollaborationLimits::default());
let services = SessionServices {
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
@@ -3101,6 +3454,7 @@ mod tests {
model_family,
conversation_id,
"turn_id".to_string(),
AgentId::root(),
);
let session = Session {
@@ -3110,6 +3464,10 @@ mod tests {
features: config.features.clone(),
active_turn: Mutex::new(None),
services,
agents_config: None,
default_sandbox_policy: session_configuration.sandbox_policy,
collaboration: Mutex::new(collaboration),
collaboration_supervisor: Mutex::new(None),
next_internal_sub_id: AtomicU64::new(0),
};
@@ -3167,6 +3525,7 @@ mod tests {
let state = SessionState::new(session_configuration.clone());
let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone()));
let collaboration = CollaborationState::new(CollaborationLimits::default());
let services = SessionServices {
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
@@ -3192,6 +3551,7 @@ mod tests {
model_family,
conversation_id,
"turn_id".to_string(),
AgentId::root(),
));
let session = Arc::new(Session {
@@ -3201,6 +3561,10 @@ mod tests {
features: config.features.clone(),
active_turn: Mutex::new(None),
services,
agents_config: None,
default_sandbox_policy: session_configuration.sandbox_policy,
collaboration: Mutex::new(collaboration),
collaboration_supervisor: Mutex::new(None),
next_internal_sub_id: AtomicU64::new(0),
});

View File

@@ -182,14 +182,17 @@ async fn forward_events(
// ignore all legacy delta events
Event {
id: _,
agent_idx: _,
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
} => {}
Event {
id: _,
agent_idx: _,
msg: EventMsg::SessionConfigured(_),
} => {}
Event {
id,
agent_idx: _,
msg: EventMsg::ExecApprovalRequest(event),
} => {
// Initiate approval via parent session; do not surface to consumer.
@@ -205,6 +208,7 @@ async fn forward_events(
}
Event {
id,
agent_idx: _,
msg: EventMsg::ApplyPatchApprovalRequest(event),
} => {
handle_patch_approval(
@@ -372,6 +376,7 @@ mod tests {
tx_out
.send(Event {
id: "full".to_string(),
agent_idx: Some(0),
msg: EventMsg::TurnAborted(TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
}),
@@ -391,6 +396,7 @@ mod tests {
tx_events
.send(Event {
id: "evt".to_string(),
agent_idx: Some(0),
msg: EventMsg::RawResponseItem(RawResponseItemEvent {
item: ResponseItem::CustomToolCall {
id: None,

View File

@@ -145,6 +145,7 @@ impl ConversationManager {
let session_configured = match event {
Event {
id,
agent_idx: _,
msg: EventMsg::SessionConfigured(session_configured),
} if id == INITIAL_SUBMIT_ID => session_configured,
_ => {

View File

@@ -710,6 +710,7 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
});
let event = Event {
id: stream.sub_id.clone(),
agent_idx: None,
msg,
};
#[allow(clippy::let_unit_value)]

View File

@@ -64,6 +64,8 @@ pub enum Feature {
ShellSnapshot,
/// Experimental TUI v2 (viewport) implementation.
Tui2,
/// Enable multi-agent collaboration workflows.
MultiAgents,
}
impl Feature {
@@ -353,6 +355,12 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Experimental,
default_enabled: false,
},
FeatureSpec {
id: Feature::MultiAgents,
key: "multi_agents",
stage: Stage::Experimental,
default_enabled: false,
},
FeatureSpec {
id: Feature::ShellSnapshot,
key: "shell_snapshot",

View File

@@ -5,6 +5,7 @@
// the TUI or the tracing stack).
#![deny(clippy::print_stdout, clippy::print_stderr)]
mod agents;
pub mod api_bridge;
mod apply_patch;
pub mod auth;

View File

@@ -159,6 +159,7 @@ impl ElicitationRequestManager {
let _ = tx_event
.send(Event {
id: "mcp_elicitation_request".to_string(),
agent_idx: Some(0),
msg: EventMsg::ElicitationRequest(ElicitationRequestEvent {
server_name,
id,
@@ -370,6 +371,7 @@ impl McpConnectionManager {
let _ = tx_event
.send(Event {
id: INITIAL_SUBMIT_ID.to_owned(),
agent_idx: Some(0),
msg: EventMsg::McpStartupComplete(summary),
})
.await;
@@ -661,6 +663,7 @@ async fn emit_update(
tx_event
.send(Event {
id: INITIAL_SUBMIT_ID.to_owned(),
agent_idx: Some(0),
msg: EventMsg::McpStartupUpdate(update),
})
.await

View File

@@ -301,6 +301,9 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
experimental_supported_tools: vec![
"collaboration".to_string(),
],
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: false,
support_verbosity: false,
@@ -317,6 +320,12 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
experimental_supported_tools: vec![
"grep_files".to_string(),
"list_dir".to_string(),
"read_file".to_string(),
"test_sync_tool".to_string(),
],
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: false,
support_verbosity: false,

View File

@@ -0,0 +1,354 @@
//! Session-scoped collaboration state for multi-agent flows.
use std::collections::HashMap;
use std::fmt;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use serde::Deserialize;
use serde::Serialize;
use crate::codex::SessionConfiguration;
use crate::context_manager::ContextManager;
use crate::protocol::TokenUsageInfo;
use crate::truncate::TruncationPolicy;
use tracing::warn;
fn content_for_log(message: &ResponseItem) -> String {
match message {
ResponseItem::Message { content, .. } => {
let mut rendered = String::new();
let mut is_first = true;
for item in content {
if !is_first {
rendered.push('\n');
}
is_first = false;
match item {
ContentItem::InputText { text } => rendered.push_str(text),
_ => rendered.push_str("<non-text content>"),
}
}
rendered
}
_ => "<non-message item>".to_string(),
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) struct AgentId(pub String);
impl AgentId {
pub fn root() -> Self {
Self("root".to_string())
}
pub fn random() -> Self {
Self(uuid::Uuid::new_v4().to_string())
}
pub fn is_root(&self) -> bool {
self.0 == "root"
}
}
impl fmt::Display for AgentId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub(crate) enum AgentLifecycleState {
Idle { last_agent_message: Option<String> },
Running,
Exhausted,
Error { error: String },
Closed,
WaitingForApproval { request: String },
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) enum ContextStrategy {
New,
Fork,
Replace(Vec<ResponseItem>),
}
impl Default for ContextStrategy {
fn default() -> Self {
Self::New
}
}
#[derive(Clone, Debug)]
pub(crate) struct AgentState {
pub(crate) id: AgentId,
pub(crate) name: String,
pub(crate) parent: Option<AgentId>,
pub(crate) depth: i32,
pub(crate) config: SessionConfiguration,
pub(crate) instructions: Option<String>,
pub(crate) status: AgentLifecycleState,
pub(crate) history: ContextManager,
}
impl AgentState {
pub(crate) fn new_root(
name: String,
config: SessionConfiguration,
history: ContextManager,
instructions: Option<String>,
) -> Self {
Self {
id: AgentId::root(),
name,
parent: None,
depth: 0,
config,
instructions,
status: AgentLifecycleState::Idle {
last_agent_message: None,
},
history,
}
}
pub(crate) fn new_child(
id: AgentId,
name: String,
parent: AgentId,
depth: i32,
config: SessionConfiguration,
instructions: Option<String>,
history: ContextManager,
) -> Self {
Self {
id,
name,
parent: Some(parent),
depth,
config,
instructions,
status: AgentLifecycleState::Idle {
last_agent_message: None,
},
history,
}
}
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct CollaborationLimits {
pub(crate) max_agents: i32,
pub(crate) max_depth: i32,
}
impl Default for CollaborationLimits {
fn default() -> Self {
Self {
max_agents: 8,
max_depth: 4,
}
}
}
pub(crate) struct CollaborationState {
agents: Vec<AgentState>,
children: HashMap<AgentId, Vec<AgentId>>,
limits: CollaborationLimits,
next_sub_id: i64,
sub_ids: HashMap<String, AgentId>,
agent_indices: HashMap<AgentId, usize>,
}
impl CollaborationState {
pub(crate) fn new(limits: CollaborationLimits) -> Self {
Self {
agents: Vec::new(),
children: HashMap::new(),
limits,
next_sub_id: 0,
sub_ids: HashMap::new(),
agent_indices: HashMap::new(),
}
}
pub(crate) fn limits(&self) -> &CollaborationLimits {
&self.limits
}
pub(crate) fn ensure_root_agent(
&mut self,
session_configuration: &SessionConfiguration,
session_history: &ContextManager,
) -> AgentId {
if self.agents.is_empty() {
let root = AgentState::new_root(
"orchestrator".to_string(),
session_configuration.clone(),
session_history.clone(),
session_configuration
.developer_instructions()
.or_else(|| session_configuration.user_instructions()),
);
self.agents.push(root);
self.agent_indices
.insert(self.agents[0].id.clone(), 0);
} else if let Some(root) = self.agents.get_mut(0) {
root.config = session_configuration.clone();
root.history = session_history.clone();
if root.instructions.is_none() {
root.instructions = session_configuration
.developer_instructions()
.or_else(|| session_configuration.user_instructions());
}
self.agent_indices.insert(root.id.clone(), 0);
}
AgentId::root()
}
pub(crate) fn agents(&self) -> &[AgentState] {
&self.agents
}
pub(crate) fn agent(&self, id: &AgentId) -> Option<&AgentState> {
self.index_for(id).and_then(|idx| self.agents.get(idx))
}
pub(crate) fn agent_mut(&mut self, id: &AgentId) -> Option<&mut AgentState> {
let index = self.index_for(id)?;
self.agents.get_mut(index)
}
pub(crate) fn clone_agent_history(&self, id: &AgentId) -> Option<ContextManager> {
self.agent(id).map(|agent| agent.history.clone())
}
pub(crate) fn set_agent_history(
&mut self,
id: &AgentId,
items: Vec<ResponseItem>,
token_info: Option<TokenUsageInfo>,
) -> Result<(), String> {
let agent = self
.agent_mut(id)
.ok_or_else(|| format!("unknown agent {id}"))?;
agent.history.replace(items);
agent.history.set_token_info(token_info);
Ok(())
}
pub(crate) fn record_message_for_agent(&mut self, id: &AgentId, message: ResponseItem) {
let role = match &message {
ResponseItem::Message { role, .. } => role.as_str(),
_ => "other",
};
let content = content_for_log(&message);
if let Some(agent) = self.agent_mut(id) {
warn!(
agent_idx = %id,
agent_name = agent.name.as_str(),
role,
content,
"collaboration: agent received message"
);
agent
.history
.record_items([message].iter(), TruncationPolicy::Bytes(10_000));
} else {
warn!(
agent_idx = %id,
agent_name = "<unknown>",
role,
content,
"collaboration: message delivered to unknown agent"
);
}
}
#[allow(dead_code)]
pub(crate) fn record_items_for_agent(
&mut self,
id: &AgentId,
items: &[ResponseItem],
policy: TruncationPolicy,
) {
if let Some(agent) = self.agent_mut(id) {
agent.history.record_items(items.iter(), policy);
}
}
pub(crate) fn add_child(&mut self, mut agent: AgentState) -> Result<AgentId, String> {
if self.agents.len() as i32 >= self.limits.max_agents {
return Err("max agent count reached".to_string());
}
if agent.depth > self.limits.max_depth {
return Err("max collaboration depth reached".to_string());
}
let id = agent.id.clone();
if self.agent_indices.contains_key(&id) {
return Err(format!("duplicate agent id {id}"));
}
if let Some(parent) = agent.parent.as_ref() {
self.children.entry(parent.clone()).or_default().push(id.clone());
}
let index = self.agents.len();
self.agents.push(agent);
self.agent_indices.insert(id.clone(), index);
Ok(id)
}
pub(crate) fn is_direct_child(&self, parent: &AgentId, child: &AgentId) -> bool {
self.children
.get(parent)
.map(|kids| kids.contains(&child))
.unwrap_or(false)
}
pub(crate) fn descendants(&self, roots: &[AgentId]) -> Vec<AgentId> {
let mut result = Vec::new();
let mut stack: Vec<AgentId> = roots.to_vec();
while let Some(id) = stack.pop() {
if let Some(children) = self.children.get(&id) {
for child in children {
stack.push(child.clone());
}
}
result.push(id);
}
result
}
pub(crate) fn next_agent_id(&self) -> AgentId {
AgentId::random()
}
pub(crate) fn next_sub_id(&mut self, agent: &AgentId) -> String {
let next_sub_id = self.next_sub_id;
let sub_id = format!("collab-agent-{agent}-{next_sub_id}");
self.next_sub_id += 1;
sub_id
}
pub(crate) fn agent_index(&self, id: &AgentId) -> Option<i32> {
self.index_for(id).and_then(|idx| i32::try_from(idx).ok())
}
fn index_for(&self, id: &AgentId) -> Option<usize> {
self.agent_indices.get(id).copied()
}
pub(crate) fn register_sub_id(&mut self, agent: &AgentId, sub_id: String) {
self.sub_ids.insert(sub_id, agent.clone());
}
pub(crate) fn agent_for_sub_id(&self, sub_id: &str) -> Option<AgentId> {
self.sub_ids.get(sub_id).cloned()
}
}

View File

@@ -1,7 +1,14 @@
mod collaboration;
mod service;
mod session;
mod turn;
pub(crate) use collaboration::AgentId;
pub(crate) use collaboration::AgentLifecycleState;
pub(crate) use collaboration::AgentState;
pub(crate) use collaboration::CollaborationLimits;
pub(crate) use collaboration::CollaborationState;
pub(crate) use collaboration::ContextStrategy;
pub(crate) use service::SessionServices;
pub(crate) use session::SessionState;
pub(crate) use turn::ActiveTurn;

View File

@@ -0,0 +1,328 @@
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::codex::run_collaboration_turn;
use crate::state::AgentId;
use crate::state::AgentLifecycleState;
use crate::state::TaskKind;
use crate::tools::context::SharedTurnDiffTracker;
use crate::turn_diff_tracker::TurnDiffTracker;
use codex_protocol::user_input::UserInput;
use super::SessionTask;
use super::SessionTaskContext;
/// An async supervisor that drives child collaboration agents.
///
/// Each agent gets a runner task that executes turns on demand and emits
/// `AgentRunResult` events on a broadcast channel. History swaps are serialized
/// via a shared lock so runners can progress while the main agent continues.
#[derive(Clone)]
pub(crate) struct CollaborationSupervisor {
tx: mpsc::Sender<SupervisorCommand>,
events: broadcast::Sender<AgentRunResult>,
}
#[derive(Debug, Clone)]
pub(crate) struct AgentRunResult {
pub(crate) agent: AgentId,
pub(crate) status: AgentLifecycleState,
}
#[derive(Debug)]
enum AgentCommand {
Run { max_duration: i32 },
Close,
}
enum SupervisorCommand {
RunAgents {
targets: Vec<AgentId>,
max_duration: i32,
},
CloseAgents {
targets: Vec<AgentId>,
},
}
impl CollaborationSupervisor {
pub(crate) fn spawn(session: Arc<Session>) -> Self {
let (tx, mut rx) = mpsc::channel::<SupervisorCommand>(8);
let (events, _rx) = broadcast::channel::<AgentRunResult>(64);
let mut runners: HashMap<AgentId, mpsc::Sender<AgentCommand>> = HashMap::new();
let events_tx = events.clone();
tokio::spawn(async move {
while let Some(cmd) = rx.recv().await {
match cmd {
SupervisorCommand::RunAgents {
targets,
max_duration,
} => {
for agent in &targets {
ensure_runner(
*agent,
&mut runners,
Arc::clone(&session),
events_tx.clone(),
);
}
for target in targets {
let tx = runners.get(&target).cloned();
if let Some(tx) = tx {
match tx.try_send(AgentCommand::Run { max_duration }) {
Ok(()) | Err(mpsc::error::TrySendError::Full(_)) => {}
Err(mpsc::error::TrySendError::Closed(_)) => {
runners.remove(&target);
}
}
}
}
}
SupervisorCommand::CloseAgents { targets } => {
for agent in targets {
if let Some(tx) = runners.remove(&agent) {
let _ = tx.send(AgentCommand::Close).await;
}
}
}
}
}
});
Self { tx, events }
}
pub(crate) fn subscribe(&self) -> broadcast::Receiver<AgentRunResult> {
self.events.subscribe()
}
pub(crate) async fn start_agents(
&self,
targets: Vec<AgentId>,
max_duration: i32,
) -> Result<(), String> {
let cmd = SupervisorCommand::RunAgents {
targets,
max_duration,
};
self.tx
.send(cmd)
.await
.map_err(|err| format!("collaboration supervisor unavailable: {err}"))
}
pub(crate) async fn close_agents(&self, targets: Vec<AgentId>) {
let _ = self
.tx
.send(SupervisorCommand::CloseAgents { targets })
.await;
}
}
fn ensure_runner(
agent: AgentId,
runners: &mut HashMap<AgentId, mpsc::Sender<AgentCommand>>,
session: Arc<Session>,
events: broadcast::Sender<AgentRunResult>,
) {
if runners.contains_key(&agent) {
return;
}
let (tx, mut rx) = mpsc::channel::<AgentCommand>(1);
runners.insert(agent, tx);
tokio::spawn(async move {
let mut pending_run = false;
let mut next_budget = 0;
loop {
if !pending_run {
match rx.recv().await {
Some(AgentCommand::Run { max_duration }) => {
pending_run = true;
next_budget = max_duration;
}
Some(AgentCommand::Close) | None => break,
}
}
if !pending_run {
continue;
}
let budget = next_budget;
pending_run = false;
next_budget = i32::MAX;
match run_agent_turns(Arc::clone(&session), agent, budget).await {
Ok((results, keep_running)) => {
for result in results {
let _ = events.send(result);
}
pending_run = keep_running;
}
Err(err) => {
let _ = events.send(AgentRunResult {
agent,
status: AgentLifecycleState::Error { error: err },
});
}
}
}
});
}
async fn run_agent_turns(
session: Arc<Session>,
target: AgentId,
max_duration: i32,
) -> Result<(Vec<AgentRunResult>, bool), String> {
let mut remaining_budget = max_duration;
let mut results = Vec::new();
let mut keep_running = false;
while remaining_budget > 0 {
let agent_snapshot = {
let collab = session.collaboration_state().lock().await;
collab.agent(target).cloned()
};
let Some(agent_snapshot) = agent_snapshot else {
return Err(format!("unknown agent {}", target.0));
};
if matches!(
agent_snapshot.status,
AgentLifecycleState::Closed
| AgentLifecycleState::Exhausted
| AgentLifecycleState::Error { .. }
) {
results.push(AgentRunResult {
agent: target,
status: agent_snapshot.status,
});
break;
}
{
let mut collab = session.collaboration_state().lock().await;
if let Some(agent) = collab.agent_mut(target) {
agent.status = AgentLifecycleState::Running;
}
}
let mut agent_history = agent_snapshot.history.clone();
let sub_id = {
let mut collab = session.collaboration_state().lock().await;
collab.next_sub_id(target)
};
let turn_context = session
.make_collaboration_turn_context(&agent_snapshot, sub_id.clone())
.await;
session.register_sub_id(target, sub_id.clone()).await;
let tracker: SharedTurnDiffTracker =
Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
let mut agent_status = AgentLifecycleState::Running;
let before_tokens = agent_history.get_total_token_usage();
let run_result = run_collaboration_turn(
Arc::clone(&session),
Arc::clone(&turn_context),
tracker,
agent_history.get_history_for_prompt(),
CancellationToken::new(),
)
.await;
let (delta_tokens, continue_running) = match run_result {
Ok((needs_follow_up, last)) => {
let new_history = session.clone_history_for_agent(target).await;
let after_tokens = new_history.get_total_token_usage();
let delta_tokens = after_tokens
.saturating_sub(before_tokens)
.clamp(0, i32::MAX as i64) as i32;
{
let mut collab = session.collaboration_state().lock().await;
if let Some(agent) = collab.agent_mut(target) {
if needs_follow_up {
agent_status = AgentLifecycleState::Running;
} else {
agent_status = AgentLifecycleState::Idle {
last_agent_message: last.clone(),
};
}
agent.status = agent_status.clone();
agent.history = new_history.clone();
}
}
(delta_tokens, needs_follow_up)
}
Err(err) => {
{
let mut collab = session.collaboration_state().lock().await;
if let Some(agent) = collab.agent_mut(target) {
agent_status = AgentLifecycleState::Error {
error: err.to_string(),
};
agent.status = agent_status.clone();
}
}
(0, false)
}
};
remaining_budget = remaining_budget.saturating_sub(delta_tokens);
let final_status = {
let mut collab = session.collaboration_state().lock().await;
if continue_running
&& remaining_budget <= 0
&& let Some(agent) = collab.agent_mut(target)
{
agent.status = AgentLifecycleState::Exhausted;
}
collab.agent(target).map(|a| a.status.clone())
}
.unwrap_or(agent_status.clone());
results.push(AgentRunResult {
agent: target,
status: final_status,
});
keep_running |= continue_running && remaining_budget > 0;
if !continue_running || remaining_budget <= 0 {
break;
}
}
Ok((results, keep_running))
}
/// Collaboration task wrapper.
#[allow(dead_code)]
#[derive(Clone, Copy, Default)]
pub(crate) struct CollaborationTask;
#[async_trait]
impl SessionTask for CollaborationTask {
fn kind(&self) -> TaskKind {
TaskKind::Regular
}
async fn run(
self: Arc<Self>,
session: Arc<SessionTaskContext>,
ctx: Arc<TurnContext>,
input: Vec<UserInput>,
cancellation_token: CancellationToken,
) -> Option<String> {
let sess = session.clone_session();
crate::codex::run_task(sess, ctx, input, cancellation_token).await
}
}

View File

@@ -1,3 +1,4 @@
mod collaboration;
mod compact;
mod ghost_snapshot;
mod regular;
@@ -29,6 +30,8 @@ use crate::state::RunningTask;
use crate::state::TaskKind;
use codex_protocol::user_input::UserInput;
pub(crate) use collaboration::CollaborationSupervisor;
pub(crate) use collaboration::CollaborationTask;
pub(crate) use compact::CompactTask;
pub(crate) use ghost_snapshot::GhostSnapshotTask;
pub(crate) use regular::RegularTask;

View File

@@ -1,14 +1,15 @@
use std::sync::Arc;
use crate::codex::TurnContext;
use crate::codex::run_task;
use crate::state::TaskKind;
use async_trait::async_trait;
use codex_protocol::user_input::UserInput;
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use tracing::trace_span;
use crate::codex::TurnContext;
use crate::codex::run_task;
use crate::state::TaskKind;
use super::SessionTask;
use super::SessionTaskContext;

View File

@@ -260,6 +260,7 @@ pub(crate) fn create_apply_patch_json_tool() -> ToolSpec {
"input".to_string(),
JsonSchema::String {
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
enum_values: None,
},
);

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
pub mod apply_patch;
pub mod collaboration;
mod grep_files;
mod list_dir;
mod mcp;
@@ -13,6 +14,7 @@ mod view_image;
pub use plan::PLAN_TOOL;
pub use apply_patch::ApplyPatchHandler;
pub use collaboration::CollaborationHandler;
pub use grep_files::GrepFilesHandler;
pub use list_dir::ListDirHandler;
pub use mcp::McpHandler;

View File

@@ -19,11 +19,18 @@ pub struct PlanHandler;
pub static PLAN_TOOL: LazyLock<ToolSpec> = LazyLock::new(|| {
let mut plan_item_props = BTreeMap::new();
plan_item_props.insert("step".to_string(), JsonSchema::String { description: None });
plan_item_props.insert(
"step".to_string(),
JsonSchema::String {
description: None,
enum_values: None,
},
);
plan_item_props.insert(
"status".to_string(),
JsonSchema::String {
description: Some("One of: pending, in_progress, completed".to_string()),
enum_values: None,
},
);
@@ -39,7 +46,10 @@ pub static PLAN_TOOL: LazyLock<ToolSpec> = LazyLock::new(|| {
let mut properties = BTreeMap::new();
properties.insert(
"explanation".to_string(),
JsonSchema::String { description: None },
JsonSchema::String {
description: None,
enum_values: None,
},
);
properties.insert("plan".to_string(), plan_items_schema);

View File

@@ -35,8 +35,7 @@ impl ToolRouter {
config: &ToolsConfig,
mcp_tools: Option<HashMap<String, mcp_types::Tool>>,
) -> Self {
let builder = build_specs(config, mcp_tools);
let (specs, registry) = builder.build();
let (specs, registry) = build_specs(config, mcp_tools).build();
Self { registry, specs }
}
@@ -139,6 +138,27 @@ impl ToolRouter {
tracker: SharedTurnDiffTracker,
call: ToolCall,
) -> Result<ResponseInputItem, FunctionCallError> {
let session_configuration = session.current_session_configuration().await;
let session_history = session.clone_history().await;
let mut caller_agent = turn.collaboration_agent();
{
let mut collab = session.collaboration_state().lock().await;
collab.ensure_root_agent(&session_configuration, &session_history);
if let Some(mapped) = collab.agent_for_sub_id(&turn.sub_id) {
caller_agent = mapped;
}
if collab.agent(caller_agent).is_none() {
return Ok(Self::failure_response(
call.call_id,
matches!(call.payload, ToolPayload::Custom { .. }),
FunctionCallError::RespondToModel(format!(
"unknown collaboration agent {}",
caller_agent.0
)),
));
}
}
let ToolCall {
tool_name,
call_id,
@@ -190,3 +210,21 @@ impl ToolRouter {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::codex::make_session_and_context_with_rx;
#[test]
fn builds_specs_without_filtering() {
let (_session, turn, _rx) = make_session_and_context_with_rx();
let router = ToolRouter::from_config(&turn.tools_config, None);
let has_update_plan = router
.specs()
.into_iter()
.map(|spec| spec.name().to_string())
.any(|name| name == "update_plan");
assert!(has_update_plan);
}
}

View File

@@ -6,6 +6,11 @@ use crate::openai_models::model_family::ModelFamily;
use crate::tools::handlers::PLAN_TOOL;
use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool;
use crate::tools::handlers::apply_patch::create_apply_patch_json_tool;
use crate::tools::handlers::collaboration::create_collaboration_close_tool;
use crate::tools::handlers::collaboration::create_collaboration_get_state_tool;
use crate::tools::handlers::collaboration::create_collaboration_init_agent_tool;
use crate::tools::handlers::collaboration::create_collaboration_send_tool;
use crate::tools::handlers::collaboration::create_collaboration_wait_tool;
use crate::tools::registry::ToolRegistryBuilder;
use codex_protocol::openai_models::ApplyPatchToolType;
use codex_protocol::openai_models::ConfigShellToolType;
@@ -23,6 +28,7 @@ pub(crate) struct ToolsConfig {
pub web_search_request: bool,
pub include_view_image_tool: bool,
pub experimental_supported_tools: Vec<String>,
pub collaboration_agent_allowlist: Option<Vec<String>>,
}
pub(crate) struct ToolsConfigParams<'a> {
@@ -66,6 +72,7 @@ impl ToolsConfig {
web_search_request: include_web_search_request,
include_view_image_tool,
experimental_supported_tools: model_family.experimental_supported_tools.clone(),
collaboration_agent_allowlist: None,
}
}
}
@@ -81,6 +88,8 @@ pub(crate) enum JsonSchema {
String {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
#[serde(rename = "enum", skip_serializing_if = "Option::is_none")]
enum_values: Option<Vec<String>>,
},
/// MCP schema allows "number" | "integer" for Number
#[serde(alias = "integer")]
@@ -132,6 +141,7 @@ fn create_exec_command_tool() -> ToolSpec {
"cmd".to_string(),
JsonSchema::String {
description: Some("Shell command to execute.".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -141,12 +151,14 @@ fn create_exec_command_tool() -> ToolSpec {
"Optional working directory to run the command in; defaults to the turn cwd."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
"shell".to_string(),
JsonSchema::String {
description: Some("Shell binary to launch. Defaults to /bin/bash.".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -181,6 +193,7 @@ fn create_exec_command_tool() -> ToolSpec {
"Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\"."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -190,6 +203,7 @@ fn create_exec_command_tool() -> ToolSpec {
"Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command."
.to_string(),
),
enum_values: None,
},
);
@@ -219,6 +233,7 @@ fn create_write_stdin_tool() -> ToolSpec {
"chars".to_string(),
JsonSchema::String {
description: Some("Bytes to write to stdin (may be empty to poll).".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -257,7 +272,10 @@ fn create_shell_tool() -> ToolSpec {
properties.insert(
"command".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
items: Box::new(JsonSchema::String {
description: None,
enum_values: None,
}),
description: Some("The command to execute".to_string()),
},
);
@@ -265,6 +283,7 @@ fn create_shell_tool() -> ToolSpec {
"workdir".to_string(),
JsonSchema::String {
description: Some("The working directory to execute the command in".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -278,12 +297,14 @@ fn create_shell_tool() -> ToolSpec {
"sandbox_permissions".to_string(),
JsonSchema::String {
description: Some("Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\".".to_string()),
enum_values: None,
},
);
properties.insert(
"justification".to_string(),
JsonSchema::String {
description: Some("Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command.".to_string()),
enum_values: None,
},
);
@@ -324,12 +345,14 @@ fn create_shell_command_tool() -> ToolSpec {
description: Some(
"The shell script to execute in the user's default shell".to_string(),
),
enum_values: None,
},
);
properties.insert(
"workdir".to_string(),
JsonSchema::String {
description: Some("The working directory to execute the command in".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -351,12 +374,14 @@ fn create_shell_command_tool() -> ToolSpec {
"sandbox_permissions".to_string(),
JsonSchema::String {
description: Some("Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\".".to_string()),
enum_values: None,
},
);
properties.insert(
"justification".to_string(),
JsonSchema::String {
description: Some("Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command.".to_string()),
enum_values: None,
},
);
@@ -395,6 +420,7 @@ fn create_view_image_tool() -> ToolSpec {
"path".to_string(),
JsonSchema::String {
description: Some("Local filesystem path to an image file".to_string()),
enum_values: None,
},
);
@@ -436,6 +462,7 @@ fn create_test_sync_tool() -> ToolSpec {
description: Some(
"Identifier shared by concurrent calls that should rendezvous".to_string(),
),
enum_values: None,
},
);
barrier_properties.insert(
@@ -480,6 +507,7 @@ fn create_grep_files_tool() -> ToolSpec {
"pattern".to_string(),
JsonSchema::String {
description: Some("Regular expression pattern to search for.".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -490,6 +518,7 @@ fn create_grep_files_tool() -> ToolSpec {
\"*.{ts,tsx}\")."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -499,6 +528,7 @@ fn create_grep_files_tool() -> ToolSpec {
"Directory or file path to search. Defaults to the session's working directory."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -530,6 +560,7 @@ fn create_read_file_tool() -> ToolSpec {
"file_path".to_string(),
JsonSchema::String {
description: Some("Absolute path to the file".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -554,6 +585,7 @@ fn create_read_file_tool() -> ToolSpec {
to expand around an anchor line."
.to_string(),
),
enum_values: None,
},
);
@@ -628,6 +660,7 @@ fn create_list_dir_tool() -> ToolSpec {
"dir_path".to_string(),
JsonSchema::String {
description: Some("Absolute path to the directory to list.".to_string()),
enum_values: None,
},
);
properties.insert(
@@ -676,6 +709,7 @@ fn create_list_mcp_resources_tool() -> ToolSpec {
"Optional MCP server name. When omitted, lists resources from every configured server."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -685,6 +719,7 @@ fn create_list_mcp_resources_tool() -> ToolSpec {
"Opaque cursor returned by a previous list_mcp_resources call for the same server."
.to_string(),
),
enum_values: None,
},
);
@@ -709,6 +744,7 @@ fn create_list_mcp_resource_templates_tool() -> ToolSpec {
"Optional MCP server name. When omitted, lists resource templates from all configured servers."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -718,6 +754,7 @@ fn create_list_mcp_resource_templates_tool() -> ToolSpec {
"Opaque cursor returned by a previous list_mcp_resource_templates call for the same server."
.to_string(),
),
enum_values: None,
},
);
@@ -742,6 +779,7 @@ fn create_read_mcp_resource_tool() -> ToolSpec {
"MCP server name exactly as configured. Must match the 'server' field returned by list_mcp_resources."
.to_string(),
),
enum_values: None,
},
);
properties.insert(
@@ -751,6 +789,7 @@ fn create_read_mcp_resource_tool() -> ToolSpec {
"Resource URI to read. Must be one of the URIs returned by list_mcp_resources."
.to_string(),
),
enum_values: None,
},
);
@@ -977,6 +1016,7 @@ pub(crate) fn build_specs(
mcp_tools: Option<HashMap<String, mcp_types::Tool>>,
) -> ToolRegistryBuilder {
use crate::tools::handlers::ApplyPatchHandler;
use crate::tools::handlers::CollaborationHandler;
use crate::tools::handlers::GrepFilesHandler;
use crate::tools::handlers::ListDirHandler;
use crate::tools::handlers::McpHandler;
@@ -997,6 +1037,7 @@ pub(crate) fn build_specs(
let plan_handler = Arc::new(PlanHandler);
let apply_patch_handler = Arc::new(ApplyPatchHandler);
let view_image_handler = Arc::new(ViewImageHandler);
let collaboration_handler = Arc::new(CollaborationHandler);
let mcp_handler = Arc::new(McpHandler);
let mcp_resource_handler = Arc::new(McpResourceHandler);
let shell_command_handler = Arc::new(ShellCommandHandler);
@@ -1040,6 +1081,32 @@ pub(crate) fn build_specs(
builder.push_spec(PLAN_TOOL.clone());
builder.register_handler("update_plan", plan_handler);
if config
.experimental_supported_tools
.contains(&"collaboration".to_string())
&& config
.collaboration_agent_allowlist
.as_ref()
.is_some_and(|allowlist| !allowlist.is_empty())
{
builder.push_spec(create_collaboration_init_agent_tool(
config
.collaboration_agent_allowlist
.as_deref()
.unwrap_or_default(),
));
builder.push_spec(create_collaboration_send_tool());
builder.push_spec(create_collaboration_wait_tool());
builder.push_spec(create_collaboration_get_state_tool());
builder.push_spec(create_collaboration_close_tool());
builder.register_handler("collaboration_init_agent", collaboration_handler.clone());
builder.register_handler("collaboration_send", collaboration_handler.clone());
builder.register_handler("collaboration_wait", collaboration_handler.clone());
builder.register_handler("collaboration_get_state", collaboration_handler.clone());
builder.register_handler("collaboration_close", collaboration_handler);
}
if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type {
match apply_patch_tool_type {
ApplyPatchToolType::Freeform => {
@@ -1183,7 +1250,10 @@ mod tests {
fn strip_descriptions_schema(schema: &mut JsonSchema) {
match schema {
JsonSchema::Boolean { description }
| JsonSchema::String { description }
| JsonSchema::String {
description,
enum_values: _,
}
| JsonSchema::Number { description } => {
*description = None;
}
@@ -1255,6 +1325,10 @@ mod tests {
create_read_mcp_resource_tool(),
PLAN_TOOL.clone(),
create_apply_patch_freeform_tool(),
create_grep_files_tool(),
create_read_file_tool(),
create_list_dir_tool(),
create_test_sync_tool(),
ToolSpec::WebSearch {},
create_view_image_tool(),
] {
@@ -1300,6 +1374,10 @@ mod tests {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"view_image",
],
);
@@ -1317,6 +1395,10 @@ mod tests {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"view_image",
],
);
@@ -1337,6 +1419,10 @@ mod tests {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"web_search",
"view_image",
],
@@ -1358,12 +1444,42 @@ mod tests {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"web_search",
"view_image",
],
);
}
#[test]
fn test_build_specs_gpt51_codex_max_default() {
assert_model_tools(
"gpt-5.1-codex-max",
&Features::with_defaults(),
&[
"shell_command",
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"collaboration_init_agent",
"collaboration_send",
"collaboration_wait",
"collaboration_get_state",
"collaboration_close",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"view_image",
],
);
}
#[test]
fn test_codex_mini_defaults() {
assert_model_tools(
@@ -1392,6 +1508,10 @@ mod tests {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"view_image",
],
);
@@ -1600,7 +1720,10 @@ mod tests {
properties: BTreeMap::from([
(
"string_argument".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
),
(
"number_argument".to_string(),
@@ -1612,7 +1735,10 @@ mod tests {
properties: BTreeMap::from([
(
"string_property".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
),
(
"number_property".to_string(),
@@ -1757,7 +1883,8 @@ mod tests {
properties: BTreeMap::from([(
"query".to_string(),
JsonSchema::String {
description: Some("search query".to_string())
description: Some("search query".to_string()),
enum_values: None,
}
)]),
required: None,
@@ -1866,7 +1993,10 @@ mod tests {
properties: BTreeMap::from([(
"tags".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
items: Box::new(JsonSchema::String {
description: None,
enum_values: None,
}),
description: None
}
)]),
@@ -1921,7 +2051,10 @@ mod tests {
parameters: JsonSchema::Object {
properties: BTreeMap::from([(
"value".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
)]),
required: None,
additional_properties: None,
@@ -2059,7 +2192,10 @@ Examples of valid command strings:
properties: BTreeMap::from([
(
"string_argument".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
),
(
"number_argument".to_string(),
@@ -2071,7 +2207,10 @@ Examples of valid command strings:
properties: BTreeMap::from([
(
"string_property".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
),
(
"number_property".to_string(),
@@ -2086,7 +2225,10 @@ Examples of valid command strings:
JsonSchema::Object {
properties: BTreeMap::from([(
"addtl_prop".to_string(),
JsonSchema::String { description: None }
JsonSchema::String {
description: None,
enum_values: None,
}
),]),
required: Some(vec!["addtl_prop".to_string(),]),
additional_properties: Some(false.into()),
@@ -2108,7 +2250,13 @@ Examples of valid command strings:
#[test]
fn chat_tools_include_top_level_name() {
let mut properties = BTreeMap::new();
properties.insert("foo".to_string(), JsonSchema::String { description: None });
properties.insert(
"foo".to_string(),
JsonSchema::String {
description: None,
enum_values: None,
},
);
let tools = vec![ToolSpec::Function(ResponsesApiTool {
name: "demo".to_string(),
description: "A demo tool".to_string(),

View File

@@ -0,0 +1,84 @@
You are a Codex Orchestrator, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
## Role
Your role is not to solve a task but to use other agents to solve it. For this, you can use the collaboration tool to start and communicate with sub-agents
A part of your role is to make sure that the task is properly done. For this:
* Always ask a reviewer to review the task. If the reviewer finds some issue, iterate with your workers and the reviewer to have something perfect.
* If an agents stops working but is not fully done, it is your role to ask the same agent or a new one to finish the task.
## Agents
* `worker`: this agent is the actual worker that can code and complete task. If a task is large or has different scopes, you can split the work between multiple workers.
* `reviewer`: this agent review the task completion. You must *always* spawn new reviewers (do not re-use old reviewers) and state what was the goal of the task when asking for a review.
* `q_and_a`: this agent is good to answer questions about the codebase. You can use it for your understanding or to answer questions of other agents. Do not reuse the same q_and_a agent for totally different questions.
## Collaboration
You can spawn and coordinate child agents using these tools:
- `collaboration_init_agent`: create a direct child by agent profile name. `agent` defaults to the callers agent type; `context_strategy` and `message` are optional. If you pass a non-empty `message`, the child starts immediately; otherwise follow with `collaboration_send`.
- `collaboration_send`: send a user-message to your direct children by id (string). You can only send messages to previously initialized agents using `collaboration_init_agent`. If the target child is already running, the call fails; `wait` first.
- `collaboration_wait`: wait up to `max_duration` milliseconds (wall time) for running children to finish and surface their latest state. You can only wait on direct child agents (optionally specify `agent_idx`).
- `collaboration_get_state`: see the calling agents direct children (or a provided `agent_idx` list), their statuses, and latest messages via `state`.
- `collaboration_close`: close specific children (and their descendants). Use `return_states` if you want the pre-close states.
If you did not include a `message` in `collaboration_init_agent`, follow with `collaboration_send` to start the child agent working.
## Plan tool
When using the planning tool:
- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
- Do not make single-step plans.
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
## Special user requests
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
## Frontend tasks
When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts.
Aim for interfaces that feel intentional, bold, and a bit surprising.
- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system).
- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias.
- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions.
- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere.
- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs.
- Ensure the page loads properly on both desktop and mobile
Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language.
## Presenting your work and final message
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
- Default: be very concise; friendly coding teammate tone.
- Ask only when needed; suggest ideas; mirror the user's style.
- For substantial work, summarize clearly; follow finalanswer formatting.
- Skip heavy formatting for simple confirmations.
- Don't dump large files you've written; reference paths only.
- No "save/copy this file" - User is on the same machine.
- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
- For code changes:
* Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
* If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
* When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result.
### Final answer structure and style guidelines
- Plain text; CLI handles styling. Use structure only when it helps scanability.
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
- Bullets: use - ; merge related points; keep to one line when possible; 46 per list ordered by importance; keep phrasing consistent.
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible.
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
- Tone: collaborative, concise, factual; present tense, active voice; selfcontained; no "above/below"; parallel wording.
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
- File References: When referencing files in your response follow the below rules:
* Use inline code to make file paths clickable.
* Each reference should have a stand alone path. Even if it's the same file.
* Accepted: absolute, workspacerelative, a/ or b/ diff prefixes, or bare filename/suffix.
* Optionally include line/column (1based): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
* Do not use URIs like file://, vscode://, or https://.
* Do not provide range of lines
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5

View File

@@ -0,0 +1,5 @@
You are a Q&A agent.
- Answer questions clearly and directly.
- Provide concise explanations or examples.
- Do not modify code.
- You can explore the codebase as you please as well as the git history to answer the questions.

View File

@@ -0,0 +1,87 @@
# Review guidelines:
You are acting as a reviewer for a proposed code change made by another engineer.
Below are some default guidelines for determining whether the original author would appreciate the issue being flagged.
These are not the final word in determining whether an issue is a bug. In many cases, you will encounter other, more specific guidelines. These may be present elsewhere in a developer message, a user message, a file, or even elsewhere in this system message.
Those guidelines should be considered to override these general instructions.
Here are the general guidelines for determining whether something is a bug and should be flagged.
1. It meaningfully impacts the accuracy, performance, security, or maintainability of the code.
2. The bug is discrete and actionable (i.e. not a general issue with the codebase or a combination of multiple issues).
3. Fixing the bug does not demand a level of rigor that is not present in the rest of the codebase (e.g. one doesn't need very detailed comments and input validation in a repository of one-off scripts in personal projects)
4. The bug was introduced in the commit (pre-existing bugs should not be flagged).
5. The author of the original PR would likely fix the issue if they were made aware of it.
6. The bug does not rely on unstated assumptions about the codebase or author's intent.
7. It is not enough to speculate that a change may disrupt another part of the codebase, to be considered a bug, one must identify the other parts of the code that are provably affected.
8. The bug is clearly not just an intentional change by the original author.
When flagging a bug, you will also provide an accompanying comment. Once again, these guidelines are not the final word on how to construct a comment -- defer to any subsequent guidelines that you encounter.
1. The comment should be clear about why the issue is a bug.
2. The comment should appropriately communicate the severity of the issue. It should not claim that an issue is more severe than it actually is.
3. The comment should be brief. The body should be at most 1 paragraph. It should not introduce line breaks within the natural language flow unless it is necessary for the code fragment.
4. The comment should not include any chunks of code longer than 3 lines. Any code chunks should be wrapped in markdown inline code tags or a code block.
5. The comment should clearly and explicitly communicate the scenarios, environments, or inputs that are necessary for the bug to arise. The comment should immediately indicate that the issue's severity depends on these factors.
6. The comment's tone should be matter-of-fact and not accusatory or overly positive. It should read as a helpful AI assistant suggestion without sounding too much like a human reviewer.
7. The comment should be written such that the original author can immediately grasp the idea without close reading.
8. The comment should avoid excessive flattery and comments that are not helpful to the original author. The comment should avoid phrasing like "Great job ...", "Thanks for ...".
Below are some more detailed guidelines that you should apply to this specific review.
HOW MANY FINDINGS TO RETURN:
Output all findings that the original author would fix if they knew about it. If there is no finding that a person would definitely love to see and fix, prefer outputting no findings. Do not stop at the first qualifying finding. Continue until you've listed every qualifying finding.
GUIDELINES:
- Ignore trivial style unless it obscures meaning or violates documented standards.
- Use one comment per distinct issue (or a multi-line range if necessary).
- Use ```suggestion blocks ONLY for concrete replacement code (minimal lines; no commentary inside the block).
- In every ```suggestion block, preserve the exact leading whitespace of the replaced lines (spaces vs tabs, number of spaces).
- Do NOT introduce or remove outer indentation levels unless that is the actual fix.
The comments will be presented in the code review as inline comments. You should avoid providing unnecessary location details in the comment body. Always keep the line range as short as possible for interpreting the issue. Avoid ranges longer than 510 lines; instead, choose the most suitable subrange that pinpoints the problem.
At the beginning of the finding title, tag the bug with priority level. For example "[P1] Un-padding slices along wrong tensor dimensions". [P0] Drop everything to fix. Blocking release, operations, or major usage. Only use for universal issues that do not depend on any assumptions about the inputs. · [P1] Urgent. Should be addressed in the next cycle · [P2] Normal. To be fixed eventually · [P3] Low. Nice to have.
Additionally, include a numeric priority field in the JSON output for each finding: set "priority" to 0 for P0, 1 for P1, 2 for P2, or 3 for P3. If a priority cannot be determined, omit the field or use null.
At the end of your findings, output an "overall correctness" verdict of whether or not the patch should be considered "correct".
Correct implies that existing code and tests will not break, and the patch is free of bugs and other blocking issues.
Ignore non-blocking issues such as style, formatting, typos, documentation, and other nits.
FORMATTING GUIDELINES:
The finding description should be one paragraph.
OUTPUT FORMAT:
## Output schema — MUST MATCH *exactly*
```json
{
"findings": [
{
"title": "<≤ 80 chars, imperative>",
"body": "<valid Markdown explaining *why* this is a problem; cite files/lines/functions>",
"confidence_score": <float 0.0-1.0>,
"priority": <int 0-3, optional>,
"code_location": {
"absolute_file_path": "<file path>",
"line_range": {"start": <int>, "end": <int>}
}
}
],
"overall_correctness": "patch is correct" | "patch is incorrect",
"overall_explanation": "<1-3 sentence explanation justifying the overall_correctness verdict>",
"overall_confidence_score": <float 0.0-1.0>
}
```
* **Do not** wrap the JSON in markdown fences or extra prose.
* The code_location field is required and must include absolute_file_path and line_range.
* Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
* The code_location should overlap with the diff.
* Do not generate a PR fix.

View File

@@ -73,6 +73,10 @@ async fn model_selects_expected_tools() {
"read_mcp_resource".to_string(),
"update_plan".to_string(),
"apply_patch".to_string(),
"grep_files".to_string(),
"read_file".to_string(),
"list_dir".to_string(),
"test_sync_tool".to_string(),
"view_image".to_string()
],
"gpt-5-codex should expose the apply_patch tool",
@@ -88,6 +92,10 @@ async fn model_selects_expected_tools() {
"read_mcp_resource".to_string(),
"update_plan".to_string(),
"apply_patch".to_string(),
"grep_files".to_string(),
"read_file".to_string(),
"list_dir".to_string(),
"test_sync_tool".to_string(),
"view_image".to_string()
],
"gpt-5.1-codex should expose the apply_patch tool",

View File

@@ -174,6 +174,10 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
"read_mcp_resource",
"update_plan",
"apply_patch",
"grep_files",
"read_file",
"list_dir",
"test_sync_tool",
"view_image",
];
let body0 = req1.single_request().body_json();

View File

@@ -160,7 +160,11 @@ impl EventProcessor for EventProcessorWithHumanOutput {
}
fn process_event(&mut self, event: Event) -> CodexStatus {
let Event { id: _, msg } = event;
let Event {
id: _,
agent_idx: _,
msg,
} = event;
match msg {
EventMsg::Error(ErrorEvent { message, .. }) => {
let prefix = "ERROR:".style(self.red);

View File

@@ -510,6 +510,7 @@ impl EventProcessor for EventProcessorWithJsonOutput {
fn print_config_summary(&mut self, _: &Config, _: &str, ev: &SessionConfiguredEvent) {
self.process_event(Event {
id: "".to_string(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(ev.clone()),
});
}

View File

@@ -61,6 +61,7 @@ use std::time::Duration;
fn event(id: &str, msg: EventMsg) -> Event {
Event {
id: id.to_string(),
agent_idx: Some(0),
msg,
}
}

View File

@@ -68,6 +68,7 @@ pub async fn run_codex_tool_session(
let session_configured_event = Event {
// Use a fake id value for now.
id: "".to_string(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(session_configured.clone()),
};
outgoing

View File

@@ -255,6 +255,7 @@ mod tests {
let rollout_file = NamedTempFile::new()?;
let event = Event {
id: "1".to_string(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "gpt-4o".to_string(),
@@ -309,6 +310,7 @@ mod tests {
};
let event = Event {
id: "1".to_string(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(session_configured_event.clone()),
};
let meta = OutgoingNotificationMeta {

View File

@@ -483,6 +483,9 @@ impl SandboxPolicy {
pub struct Event {
/// Submission `id` that this event is correlated with.
pub id: String,
/// Collaboration agent index that emitted the event. Root agent is 0.
#[serde(default)]
pub agent_idx: Option<i32>,
/// Payload
pub msg: EventMsg,
}
@@ -1857,6 +1860,7 @@ mod tests {
let rollout_file = NamedTempFile::new()?;
let event = Event {
id: "1234".to_string(),
agent_idx: Some(0),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "codex-mini-latest".to_string(),
@@ -1916,6 +1920,7 @@ mod tests {
fn serialize_mcp_startup_update_event() -> Result<()> {
let event = Event {
id: "init".to_string(),
agent_idx: Some(0),
msg: EventMsg::McpStartupUpdate(McpStartupUpdateEvent {
server: "srv".to_string(),
status: McpStartupStatus::Failed {
@@ -1936,6 +1941,7 @@ mod tests {
fn serialize_mcp_startup_complete_event() -> Result<()> {
let event = Event {
id: "init".to_string(),
agent_idx: Some(0),
msg: EventMsg::McpStartupComplete(McpStartupCompleteEvent {
ready: vec!["a".to_string()],
failed: vec![McpStartupFailure {

View File

@@ -1454,6 +1454,7 @@ mod tests {
};
app.chat_widget.handle_codex_event(Event {
agent_idx: None,
id: String::new(),
msg: EventMsg::SessionConfigured(event),
});

View File

@@ -1630,6 +1630,7 @@ impl ChatWidget {
use codex_core::protocol::FileChange;
self.app_event_tx.send(AppEvent::CodexEvent(Event {
agent_idx: Some(0),
id: "1".to_string(),
// msg: EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
// call_id: "1".to_string(),
@@ -1796,7 +1797,11 @@ impl ChatWidget {
}
pub(crate) fn handle_codex_event(&mut self, event: Event) {
let Event { id, msg } = event;
let Event {
id,
agent_idx: _,
msg,
} = event;
self.dispatch_event_msg(Some(id), msg, false);
}

View File

@@ -36,6 +36,7 @@ pub(crate) fn spawn_agent(
eprintln!("{message}");
app_event_tx_clone.send(AppEvent::CodexEvent(Event {
id: "".to_string(),
agent_idx: Some(0),
msg: EventMsg::Error(err.to_error_event(None)),
}));
app_event_tx_clone.send(AppEvent::ExitRequest);
@@ -48,6 +49,7 @@ pub(crate) fn spawn_agent(
let ev = codex_core::protocol::Event {
// The `id` does not matter for rendering, so we can use a fake value.
id: "".to_string(),
agent_idx: Some(0),
msg: codex_core::protocol::EventMsg::SessionConfigured(session_configured),
};
app_event_tx_clone.send(AppEvent::CodexEvent(ev));
@@ -85,6 +87,7 @@ pub(crate) fn spawn_agent_from_existing(
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
let ev = codex_core::protocol::Event {
id: "".to_string(),
agent_idx: Some(0),
msg: codex_core::protocol::EventMsg::SessionConfigured(session_configured),
};
app_event_tx_clone.send(AppEvent::CodexEvent(ev));

View File

@@ -129,6 +129,7 @@ fn resumed_initial_messages_render_history() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "initial".into(),
msg: EventMsg::SessionConfigured(configured),
});
@@ -161,6 +162,7 @@ fn entered_review_mode_uses_request_hint() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::BaseBranch {
@@ -182,6 +184,7 @@ fn entered_review_mode_defaults_to_current_changes_banner() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::UncommittedChanges,
@@ -218,6 +221,7 @@ fn exited_review_mode_emits_results_and_finishes() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-end".into(),
msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: Some(review),
@@ -240,6 +244,7 @@ fn review_restores_context_window_indicator() {
let review_tokens = 12_030; // ~97% remaining after subtracting baseline.
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-before".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(pre_review_tokens, context_window)),
@@ -249,6 +254,7 @@ fn review_restores_context_window_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(30));
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::BaseBranch {
@@ -259,6 +265,7 @@ fn review_restores_context_window_indicator() {
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-review".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(review_tokens, context_window)),
@@ -268,6 +275,7 @@ fn review_restores_context_window_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(97));
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-end".into(),
msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: None,
@@ -288,6 +296,7 @@ fn token_count_none_resets_context_indicator() {
let pre_compact_tokens = 12_700;
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-before".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(pre_compact_tokens, context_window)),
@@ -297,6 +306,7 @@ fn token_count_none_resets_context_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(30));
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-cleared".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: None,
@@ -327,6 +337,7 @@ fn context_indicator_shows_used_tokens_when_window_unknown() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-usage".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(token_info),
@@ -762,6 +773,7 @@ fn exec_approval_emits_proposed_command_and_decision_history() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-short".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -806,6 +818,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-multi".into(),
msg: EventMsg::ExecApprovalRequest(ev_multi),
});
@@ -856,6 +869,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-long".into(),
msg: EventMsg::ExecApprovalRequest(ev_long),
});
@@ -898,6 +912,7 @@ fn begin_exec_with_source(
interaction_input,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: call_id.to_string(),
msg: EventMsg::ExecCommandBegin(event.clone()),
});
@@ -931,6 +946,7 @@ fn end_exec(
process_id,
} = begin_event;
chat.handle_codex_event(Event {
agent_idx: None,
id: call_id.clone(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
@@ -1186,6 +1202,7 @@ fn exec_end_without_begin_uses_event_command() {
let parsed_cmd = codex_core::parse_command::parse_command(&command);
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
chat.handle_codex_event(Event {
agent_idx: None,
id: "call-orphan".to_string(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "call-orphan".to_string(),
@@ -1383,6 +1400,7 @@ fn undo_success_events_render_info_messages() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent {
message: Some("Undo requested for the last turn...".to_string()),
@@ -1394,6 +1412,7 @@ fn undo_success_events_render_info_messages() {
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".to_string(),
msg: EventMsg::UndoCompleted(UndoCompletedEvent {
success: true,
@@ -1420,6 +1439,7 @@ fn undo_failure_events_render_error_message() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-2".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent { message: None }),
});
@@ -1429,6 +1449,7 @@ fn undo_failure_events_render_error_message() {
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-2".to_string(),
msg: EventMsg::UndoCompleted(UndoCompletedEvent {
success: false,
@@ -1455,6 +1476,7 @@ fn undo_started_hides_interrupt_hint() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-hint".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent { message: None }),
});
@@ -1578,6 +1600,7 @@ fn view_image_tool_call_adds_history_cell() {
let image_path = chat.config.cwd.join("example.png");
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-image".into(),
msg: EventMsg::ViewImageToolCall(ViewImageToolCallEvent {
call_id: "call-image".into(),
@@ -1603,6 +1626,7 @@ fn interrupt_exec_marks_failed_snapshot() {
// Simulate the task being aborted (as if ESC was pressed), which should
// cause the active exec cell to be finalized as failed and flushed.
chat.handle_codex_event(Event {
agent_idx: None,
id: "call-int".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -1628,6 +1652,7 @@ fn interrupted_turn_error_message_snapshot() {
// Simulate an in-progress task so the widget is in a running state.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -1636,6 +1661,7 @@ fn interrupted_turn_error_message_snapshot() {
// Abort the turn (like pressing Esc) and drain inserted history.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2106,6 +2132,7 @@ fn approval_modal_exec_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2157,6 +2184,7 @@ fn approval_modal_exec_without_reason_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-noreason".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2197,6 +2225,7 @@ fn approval_modal_patch_snapshot() {
grant_root: Some(PathBuf::from("/tmp")),
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-patch".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2231,6 +2260,7 @@ fn interrupt_restores_queued_messages_into_composer() {
// Deliver a TurnAborted event with Interrupted reason (as if Esc was pressed).
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2269,6 +2299,7 @@ fn interrupt_prepends_queued_messages_before_existing_composer_text() {
chat.refresh_queued_user_messages();
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2314,12 +2345,14 @@ fn ui_snapshots_small_heights_task_running() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Activate status line
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Thinking**".into(),
@@ -2345,6 +2378,7 @@ fn status_widget_and_approval_modal_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Begin a running task so the status indicator would be active.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2352,6 +2386,7 @@ fn status_widget_and_approval_modal_snapshot() {
});
// Provide a deterministic header for the status line.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Analyzing**".into(),
@@ -2374,6 +2409,7 @@ fn status_widget_and_approval_modal_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-exec".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2397,6 +2433,7 @@ fn status_widget_active_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Activate the status indicator by simulating a task start.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2404,6 +2441,7 @@ fn status_widget_active_snapshot() {
});
// Provide a deterministic header via a bold reasoning chunk.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Analyzing**".into(),
@@ -2425,6 +2463,7 @@ fn mcp_startup_header_booting_snapshot() {
chat.show_welcome_banner = false;
chat.handle_codex_event(Event {
agent_idx: None,
id: "mcp-1".into(),
msg: EventMsg::McpStartupUpdate(McpStartupUpdateEvent {
server: "alpha".into(),
@@ -2446,6 +2485,7 @@ fn background_event_updates_status_header() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "bg-1".into(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: "Waiting for `vim`".to_string(),
@@ -2477,6 +2517,7 @@ fn apply_patch_events_emit_history_cells() {
grant_root: None,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2517,6 +2558,7 @@ fn apply_patch_events_emit_history_cells() {
changes: changes2,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(begin),
});
@@ -2545,6 +2587,7 @@ fn apply_patch_events_emit_history_cells() {
changes: end_changes,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyEnd(end),
});
@@ -2567,6 +2610,7 @@ fn apply_patch_manual_approval_adjusts_header() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "c1".into(),
@@ -2586,6 +2630,7 @@ fn apply_patch_manual_approval_adjusts_header() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "c1".into(),
@@ -2616,6 +2661,7 @@ fn apply_patch_manual_flow_snapshot() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "c1".into(),
@@ -2639,6 +2685,7 @@ fn apply_patch_manual_flow_snapshot() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "c1".into(),
@@ -2676,6 +2723,7 @@ fn apply_patch_approval_sends_op_with_submission_id() {
grant_root: None,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-123".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2707,6 +2755,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-1".into(),
@@ -2748,6 +2797,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "call-1".into(),
@@ -2762,6 +2812,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::PatchApplyEnd(PatchApplyEndEvent {
call_id: "call-1".into(),
@@ -2787,6 +2838,7 @@ fn apply_patch_untrusted_shows_approval_modal() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-1".into(),
@@ -2836,6 +2888,7 @@ fn apply_patch_request_shows_diff_summary() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-apply".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-apply".into(),
@@ -2906,6 +2959,7 @@ fn plan_update_renders_history_cell() {
],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::PlanUpdate(update),
});
@@ -2927,6 +2981,7 @@ fn stream_error_updates_status_indicator() {
chat.bottom_pane.set_task_running(true);
let msg = "Reconnecting... 2/5";
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::StreamError(StreamErrorEvent {
message: msg.to_string(),
@@ -2950,6 +3005,7 @@ fn stream_error_updates_status_indicator() {
fn warning_event_adds_warning_history_cell() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::Warning(WarningEvent {
message: "test warning message".to_string(),
@@ -2969,6 +3025,7 @@ fn warning_event_adds_warning_history_cell() {
fn stream_recovery_restores_previous_status_header() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "task".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2976,6 +3033,7 @@ fn stream_recovery_restores_previous_status_header() {
});
drain_insert_history(&mut rx);
chat.handle_codex_event(Event {
agent_idx: None,
id: "retry".into(),
msg: EventMsg::StreamError(StreamErrorEvent {
message: "Reconnecting... 1/5".to_string(),
@@ -2984,6 +3042,7 @@ fn stream_recovery_restores_previous_status_header() {
});
drain_insert_history(&mut rx);
chat.handle_codex_event(Event {
agent_idx: None,
id: "delta".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "hello".to_string(),
@@ -3004,6 +3063,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// Begin turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -3012,6 +3072,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// First finalized assistant message
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "First message".into(),
@@ -3020,6 +3081,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// Second finalized assistant message in the same turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Second message".into(),
@@ -3028,6 +3090,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// End turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: None,
@@ -3058,12 +3121,14 @@ fn final_reasoning_then_message_without_deltas_are_rendered() {
// No deltas; only final reasoning followed by final message.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoning(AgentReasoningEvent {
text: "I will first analyze the request.".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Here is the result.".into(),
@@ -3085,24 +3150,28 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
// Stream some reasoning deltas first.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "I will ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "first analyze the ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "request.".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoning(AgentReasoningEvent {
text: "request.".into(),
@@ -3111,12 +3180,14 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
// Then stream answer deltas, followed by the exact same final message.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "Here is the ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "result.".into(),
@@ -3124,6 +3195,7 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Here is the result.".into(),
@@ -3147,8 +3219,11 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
fn chatwidget_exec_and_status_layout_vt100_snapshot() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent { message: "Im going to search the repo for where “Change Approved” is rendered to update that view.".into() }),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
let command = vec!["bash".into(), "-lc".into(), "rg \"Change Approved\"".into()];
@@ -3166,6 +3241,7 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
];
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
chat.handle_codex_event(Event {
agent_idx: None,
id: "c1".into(),
msg: EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: "c1".into(),
@@ -3179,6 +3255,7 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "c1".into(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "c1".into(),
@@ -3198,12 +3275,14 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Investigating rendering code**".into(),
@@ -3242,6 +3321,7 @@ fn chatwidget_markdown_code_blocks_vt100_snapshot() {
// Simulate a final agent message via streaming deltas instead of a single message
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -3290,6 +3370,7 @@ printf 'fenced within fenced\n'
}
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }),
});
@@ -3313,6 +3394,7 @@ printf 'fenced within fenced\n'
// Finalize the stream without sending a final AgentMessage, to flush any tail.
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: None,
@@ -3330,6 +3412,7 @@ printf 'fenced within fenced\n'
fn chatwidget_tall() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,

View File

@@ -2447,6 +2447,7 @@ mod tests {
};
app.chat_widget.handle_codex_event(Event {
agent_idx: None,
id: String::new(),
msg: EventMsg::SessionConfigured(event),
});

View File

@@ -1630,6 +1630,7 @@ impl ChatWidget {
use codex_core::protocol::FileChange;
self.app_event_tx.send(AppEvent::CodexEvent(Event {
agent_idx: None,
id: "1".to_string(),
// msg: EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
// call_id: "1".to_string(),
@@ -1796,7 +1797,7 @@ impl ChatWidget {
}
pub(crate) fn handle_codex_event(&mut self, event: Event) {
let Event { id, msg } = event;
let Event { id, msg, .. } = event;
self.dispatch_event_msg(Some(id), msg, false);
}

View File

@@ -35,6 +35,7 @@ pub(crate) fn spawn_agent(
let message = err.to_string();
eprintln!("{message}");
app_event_tx_clone.send(AppEvent::CodexEvent(Event {
agent_idx: Some(0),
id: "".to_string(),
msg: EventMsg::Error(err.to_error_event(None)),
}));
@@ -46,6 +47,7 @@ pub(crate) fn spawn_agent(
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
let ev = codex_core::protocol::Event {
agent_idx: Some(0),
// The `id` does not matter for rendering, so we can use a fake value.
id: "".to_string(),
msg: codex_core::protocol::EventMsg::SessionConfigured(session_configured),
@@ -84,6 +86,7 @@ pub(crate) fn spawn_agent_from_existing(
tokio::spawn(async move {
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
let ev = codex_core::protocol::Event {
agent_idx: Some(0),
id: "".to_string(),
msg: codex_core::protocol::EventMsg::SessionConfigured(session_configured),
};

View File

@@ -129,6 +129,7 @@ fn resumed_initial_messages_render_history() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "initial".into(),
msg: EventMsg::SessionConfigured(configured),
});
@@ -161,6 +162,7 @@ fn entered_review_mode_uses_request_hint() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::BaseBranch {
@@ -182,6 +184,7 @@ fn entered_review_mode_defaults_to_current_changes_banner() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::UncommittedChanges,
@@ -218,6 +221,7 @@ fn exited_review_mode_emits_results_and_finishes() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-end".into(),
msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: Some(review),
@@ -240,6 +244,7 @@ fn review_restores_context_window_indicator() {
let review_tokens = 12_030; // ~97% remaining after subtracting baseline.
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-before".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(pre_review_tokens, context_window)),
@@ -249,6 +254,7 @@ fn review_restores_context_window_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(30));
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-start".into(),
msg: EventMsg::EnteredReviewMode(ReviewRequest {
target: ReviewTarget::BaseBranch {
@@ -259,6 +265,7 @@ fn review_restores_context_window_indicator() {
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-review".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(review_tokens, context_window)),
@@ -268,6 +275,7 @@ fn review_restores_context_window_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(97));
chat.handle_codex_event(Event {
agent_idx: None,
id: "review-end".into(),
msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: None,
@@ -288,6 +296,7 @@ fn token_count_none_resets_context_indicator() {
let pre_compact_tokens = 12_700;
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-before".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(make_token_info(pre_compact_tokens, context_window)),
@@ -297,6 +306,7 @@ fn token_count_none_resets_context_indicator() {
assert_eq!(chat.bottom_pane.context_window_percent(), Some(30));
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-cleared".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: None,
@@ -327,6 +337,7 @@ fn context_indicator_shows_used_tokens_when_window_unknown() {
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "token-usage".into(),
msg: EventMsg::TokenCount(TokenCountEvent {
info: Some(token_info),
@@ -762,6 +773,7 @@ fn exec_approval_emits_proposed_command_and_decision_history() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-short".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -806,6 +818,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-multi".into(),
msg: EventMsg::ExecApprovalRequest(ev_multi),
});
@@ -856,6 +869,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-long".into(),
msg: EventMsg::ExecApprovalRequest(ev_long),
});
@@ -898,6 +912,7 @@ fn begin_exec_with_source(
interaction_input,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: call_id.to_string(),
msg: EventMsg::ExecCommandBegin(event.clone()),
});
@@ -931,6 +946,7 @@ fn end_exec(
process_id,
} = begin_event;
chat.handle_codex_event(Event {
agent_idx: None,
id: call_id.clone(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
@@ -1186,6 +1202,7 @@ fn exec_end_without_begin_uses_event_command() {
let parsed_cmd = codex_core::parse_command::parse_command(&command);
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
chat.handle_codex_event(Event {
agent_idx: None,
id: "call-orphan".to_string(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "call-orphan".to_string(),
@@ -1383,6 +1400,7 @@ fn undo_success_events_render_info_messages() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent {
message: Some("Undo requested for the last turn...".to_string()),
@@ -1394,6 +1412,7 @@ fn undo_success_events_render_info_messages() {
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".to_string(),
msg: EventMsg::UndoCompleted(UndoCompletedEvent {
success: true,
@@ -1420,6 +1439,7 @@ fn undo_failure_events_render_error_message() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-2".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent { message: None }),
});
@@ -1429,6 +1449,7 @@ fn undo_failure_events_render_error_message() {
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-2".to_string(),
msg: EventMsg::UndoCompleted(UndoCompletedEvent {
success: false,
@@ -1455,6 +1476,7 @@ fn undo_started_hides_interrupt_hint() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-hint".to_string(),
msg: EventMsg::UndoStarted(UndoStartedEvent { message: None }),
});
@@ -1578,6 +1600,7 @@ fn view_image_tool_call_adds_history_cell() {
let image_path = chat.config.cwd.join("example.png");
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-image".into(),
msg: EventMsg::ViewImageToolCall(ViewImageToolCallEvent {
call_id: "call-image".into(),
@@ -1603,6 +1626,7 @@ fn interrupt_exec_marks_failed_snapshot() {
// Simulate the task being aborted (as if ESC was pressed), which should
// cause the active exec cell to be finalized as failed and flushed.
chat.handle_codex_event(Event {
agent_idx: None,
id: "call-int".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -1628,6 +1652,7 @@ fn interrupted_turn_error_message_snapshot() {
// Simulate an in-progress task so the widget is in a running state.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -1636,6 +1661,7 @@ fn interrupted_turn_error_message_snapshot() {
// Abort the turn (like pressing Esc) and drain inserted history.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2106,6 +2132,7 @@ fn approval_modal_exec_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2157,6 +2184,7 @@ fn approval_modal_exec_without_reason_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-noreason".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2197,6 +2225,7 @@ fn approval_modal_patch_snapshot() {
grant_root: Some(PathBuf::from("/tmp")),
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-patch".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2231,6 +2260,7 @@ fn interrupt_restores_queued_messages_into_composer() {
// Deliver a TurnAborted event with Interrupted reason (as if Esc was pressed).
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2269,6 +2299,7 @@ fn interrupt_prepends_queued_messages_before_existing_composer_text() {
chat.refresh_queued_user_messages();
chat.handle_codex_event(Event {
agent_idx: None,
id: "turn-1".into(),
msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent {
reason: TurnAbortReason::Interrupted,
@@ -2314,12 +2345,14 @@ fn ui_snapshots_small_heights_task_running() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Activate status line
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Thinking**".into(),
@@ -2345,6 +2378,7 @@ fn status_widget_and_approval_modal_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Begin a running task so the status indicator would be active.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2352,6 +2386,7 @@ fn status_widget_and_approval_modal_snapshot() {
});
// Provide a deterministic header for the status line.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Analyzing**".into(),
@@ -2374,6 +2409,7 @@ fn status_widget_and_approval_modal_snapshot() {
parsed_cmd: vec![],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-approve-exec".into(),
msg: EventMsg::ExecApprovalRequest(ev),
});
@@ -2397,6 +2433,7 @@ fn status_widget_active_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
// Activate the status indicator by simulating a task start.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2404,6 +2441,7 @@ fn status_widget_active_snapshot() {
});
// Provide a deterministic header via a bold reasoning chunk.
chat.handle_codex_event(Event {
agent_idx: None,
id: "task-1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Analyzing**".into(),
@@ -2425,6 +2463,7 @@ fn mcp_startup_header_booting_snapshot() {
chat.show_welcome_banner = false;
chat.handle_codex_event(Event {
agent_idx: None,
id: "mcp-1".into(),
msg: EventMsg::McpStartupUpdate(McpStartupUpdateEvent {
server: "alpha".into(),
@@ -2446,6 +2485,7 @@ fn background_event_updates_status_header() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "bg-1".into(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: "Waiting for `vim`".to_string(),
@@ -2477,6 +2517,7 @@ fn apply_patch_events_emit_history_cells() {
grant_root: None,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2517,6 +2558,7 @@ fn apply_patch_events_emit_history_cells() {
changes: changes2,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(begin),
});
@@ -2545,6 +2587,7 @@ fn apply_patch_events_emit_history_cells() {
changes: end_changes,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyEnd(end),
});
@@ -2567,6 +2610,7 @@ fn apply_patch_manual_approval_adjusts_header() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "c1".into(),
@@ -2586,6 +2630,7 @@ fn apply_patch_manual_approval_adjusts_header() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "c1".into(),
@@ -2616,6 +2661,7 @@ fn apply_patch_manual_flow_snapshot() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "c1".into(),
@@ -2639,6 +2685,7 @@ fn apply_patch_manual_flow_snapshot() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "c1".into(),
@@ -2676,6 +2723,7 @@ fn apply_patch_approval_sends_op_with_submission_id() {
grant_root: None,
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-123".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ev),
});
@@ -2707,6 +2755,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-1".into(),
@@ -2748,6 +2797,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: "call-1".into(),
@@ -2762,6 +2812,7 @@ fn apply_patch_full_flow_integration_like() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-xyz".into(),
msg: EventMsg::PatchApplyEnd(PatchApplyEndEvent {
call_id: "call-1".into(),
@@ -2787,6 +2838,7 @@ fn apply_patch_untrusted_shows_approval_modal() {
FileChange::Add { content: "".into() },
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-1".into(),
@@ -2836,6 +2888,7 @@ fn apply_patch_request_shows_diff_summary() {
},
);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-apply".into(),
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id: "call-apply".into(),
@@ -2906,6 +2959,7 @@ fn plan_update_renders_history_cell() {
],
};
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::PlanUpdate(update),
});
@@ -2927,6 +2981,7 @@ fn stream_error_updates_status_indicator() {
chat.bottom_pane.set_task_running(true);
let msg = "Reconnecting... 2/5";
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::StreamError(StreamErrorEvent {
message: msg.to_string(),
@@ -2950,6 +3005,7 @@ fn stream_error_updates_status_indicator() {
fn warning_event_adds_warning_history_cell() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "sub-1".into(),
msg: EventMsg::Warning(WarningEvent {
message: "test warning message".to_string(),
@@ -2969,6 +3025,7 @@ fn warning_event_adds_warning_history_cell() {
fn stream_recovery_restores_previous_status_header() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "task".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -2976,6 +3033,7 @@ fn stream_recovery_restores_previous_status_header() {
});
drain_insert_history(&mut rx);
chat.handle_codex_event(Event {
agent_idx: None,
id: "retry".into(),
msg: EventMsg::StreamError(StreamErrorEvent {
message: "Reconnecting... 1/5".to_string(),
@@ -2984,6 +3042,7 @@ fn stream_recovery_restores_previous_status_header() {
});
drain_insert_history(&mut rx);
chat.handle_codex_event(Event {
agent_idx: None,
id: "delta".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "hello".to_string(),
@@ -3004,6 +3063,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// Begin turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -3012,6 +3072,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// First finalized assistant message
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "First message".into(),
@@ -3020,6 +3081,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// Second finalized assistant message in the same turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Second message".into(),
@@ -3028,6 +3090,7 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// End turn
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: None,
@@ -3058,12 +3121,14 @@ fn final_reasoning_then_message_without_deltas_are_rendered() {
// No deltas; only final reasoning followed by final message.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoning(AgentReasoningEvent {
text: "I will first analyze the request.".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Here is the result.".into(),
@@ -3085,24 +3150,28 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
// Stream some reasoning deltas first.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "I will ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "first analyze the ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "request.".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentReasoning(AgentReasoningEvent {
text: "request.".into(),
@@ -3111,12 +3180,14 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
// Then stream answer deltas, followed by the exact same final message.
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "Here is the ".into(),
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent {
delta: "result.".into(),
@@ -3124,6 +3195,7 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "s1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Here is the result.".into(),
@@ -3147,6 +3219,7 @@ fn deltas_then_same_final_message_are_rendered_snapshot() {
fn chatwidget_exec_and_status_layout_vt100_snapshot() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentMessage(AgentMessageEvent { message: "Im going to search the repo for where “Change Approved” is rendered to update that view.".into() }),
});
@@ -3166,6 +3239,7 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
];
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
chat.handle_codex_event(Event {
agent_idx: None,
id: "c1".into(),
msg: EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: "c1".into(),
@@ -3179,6 +3253,7 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "c1".into(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "c1".into(),
@@ -3198,12 +3273,14 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent {
delta: "**Investigating rendering code**".into(),
@@ -3242,6 +3319,7 @@ fn chatwidget_markdown_code_blocks_vt100_snapshot() {
// Simulate a final agent message via streaming deltas instead of a single message
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
@@ -3290,6 +3368,7 @@ printf 'fenced within fenced\n'
}
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }),
});
@@ -3313,6 +3392,7 @@ printf 'fenced within fenced\n'
// Finalize the stream without sending a final AgentMessage, to flush any tail.
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskComplete(TaskCompleteEvent {
last_agent_message: None,
@@ -3330,6 +3410,7 @@ printf 'fenced within fenced\n'
fn chatwidget_tall() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None);
chat.handle_codex_event(Event {
agent_idx: None,
id: "t1".into(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,

View File

@@ -5,6 +5,7 @@ Codex configuration gives you fine-grained control over the model, execution env
## Quick navigation
- [Feature flags](#feature-flags)
- [Agent profiles](#agent-profiles)
- [Model selection](#model-selection)
- [Execution environment](#execution-environment)
- [MCP integration](#mcp-integration)
@@ -50,12 +51,41 @@ Supported features:
| `enable_experimental_windows_sandbox` | false | Experimental | Use the Windows restricted-token sandbox |
| `tui2` | false | Experimental | Use the experimental TUI v2 (viewport) implementation |
| `skills` | false | Experimental | Enable discovery and injection of skills |
| `multi_agents` | false | Experimental | Enable multi-agent collaboration workflows |
Notes:
- Omit a key to accept its default.
- Legacy booleans such as `experimental_use_exec_command_tool`, `experimental_use_unified_exec_tool`, `include_apply_patch_tool`, and similar `experimental_use_*` keys are deprecated; setting the corresponding `[features].<key>` avoids repeated warnings.
## Agent profiles
Codex can optionally load multi-agent profiles from `$CODEX_HOME/agents.toml`. When this file exists and is valid, Codex starts as the `main` profile (agent 0) and, on supported models, exposes the `collaboration_*` tools. Each profile can control which sub-agents it can spawn. Enable the `multi_agents` feature flag to run the collaboration workflow.
Example:
```toml
[main]
prompt = "You're the director..."
sub_agents = ["worker", "verifier"]
read_only = true
model = "gpt-5.1-codex-max"
[worker]
prompt = "You must solve a task."
read_only = false
[verifier]
prompt = "Check if a task is correctly solved."
read_only = true
```
Notes:
- `model` is optional for any profile; if omitted, Codex uses the main model.
- `prompt` is optional; if omitted, Codex uses the standard model prompt for that model family.
- `read_only = true` maps to a read-only sandbox; `read_only = false` uses the session default sandbox policy.
## Model selection
### model