Compare commits

...

1 Commits

Author SHA1 Message Date
starr-openai
2e71ac0f4a Split codex-core into smaller crates
Move several leaf and runtime surfaces out of codex-core so small core edits compile less code. Extract JS/V8 runtime, JS REPL assets, file watching, config editing/loading, tool approval templates, message history, review helpers, memory prompts, agent runtime/control helpers, turn state, and tool spec planning into dedicated crates while keeping codex-core as the orchestration layer.

Validation on dev:

- bazel build //codex-rs/config-loader:config-loader //codex-rs/session-runtime:session-runtime //codex-rs/tool-spec:tool-spec //codex-rs/agent-runtime:agent-runtime //codex-rs/core:core //codex-rs/core:core-unit-tests-bin

- wider combined Bazel build including app-server/code-mode/js-repl/review split crates

- just bazel-lock-check

Co-authored-by: Codex <noreply@openai.com>
2026-04-17 20:03:12 -07:00
128 changed files with 6618 additions and 4137 deletions

142
codex-rs/Cargo.lock generated
View File

@@ -1375,6 +1375,17 @@ dependencies = [
"unicode-width 0.1.14",
]
[[package]]
name = "codex-agent-runtime"
version = "0.0.0"
dependencies = [
"codex-otel",
"codex-protocol",
"pretty_assertions",
"rand 0.9.3",
"tokio",
]
[[package]]
name = "codex-analytics"
version = "0.0.0"
@@ -1454,6 +1465,7 @@ dependencies = [
"codex-backend-client",
"codex-chatgpt",
"codex-cloud-requirements",
"codex-code-mode-runtime",
"codex-config",
"codex-core",
"codex-core-plugins",
@@ -1846,13 +1858,28 @@ dependencies = [
"async-channel",
"async-trait",
"codex-protocol",
"deno_core_icudata",
"pretty_assertions",
"serde",
"serde_json",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "codex-code-mode-runtime"
version = "0.0.0"
dependencies = [
"async-channel",
"async-trait",
"codex-code-mode",
"codex-protocol",
"deno_core_icudata",
"pretty_assertions",
"serde_json",
"tokio",
"tokio-util",
"tracing",
"v8",
]
@@ -1873,6 +1900,7 @@ dependencies = [
"codex-protocol",
"codex-utils-absolute-path",
"codex-utils-path",
"dunce",
"futures",
"multimap",
"pretty_assertions",
@@ -1890,6 +1918,28 @@ dependencies = [
"wildmatch",
]
[[package]]
name = "codex-config-loader"
version = "0.0.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"codex-app-server-protocol",
"codex-config",
"codex-exec-server",
"codex-git-utils",
"codex-protocol",
"codex-utils-absolute-path",
"core-foundation 0.9.4",
"dunce",
"serde",
"tempfile",
"tokio",
"toml 0.9.11+spec-1.1.0",
"tracing",
"windows-sys 0.52.0",
]
[[package]]
name = "codex-connectors"
version = "0.0.0"
@@ -1916,6 +1966,7 @@ dependencies = [
"bm25",
"chrono",
"clap",
"codex-agent-runtime",
"codex-analytics",
"codex-api",
"codex-app-server-protocol",
@@ -1924,6 +1975,7 @@ dependencies = [
"codex-async-utils",
"codex-code-mode",
"codex-config",
"codex-config-loader",
"codex-connectors",
"codex-core-plugins",
"codex-core-skills",
@@ -1931,12 +1983,17 @@ dependencies = [
"codex-execpolicy",
"codex-features",
"codex-feedback",
"codex-file-watcher",
"codex-git-utils",
"codex-hooks",
"codex-instructions",
"codex-js-repl",
"codex-login",
"codex-mcp",
"codex-model-provider",
"codex-mcp-tool-approval",
"codex-memory-prompts",
"codex-message-history",
"codex-model-provider-info",
"codex-models-manager",
"codex-network-proxy",
@@ -1944,16 +2001,19 @@ dependencies = [
"codex-plugin",
"codex-protocol",
"codex-response-debug-context",
"codex-review",
"codex-rmcp-client",
"codex-rollout",
"codex-sandboxing",
"codex-secrets",
"codex-session-runtime",
"codex-shell-command",
"codex-shell-escalation",
"codex-state",
"codex-terminal-detection",
"codex-test-binary-support",
"codex-thread-store",
"codex-tool-spec",
"codex-tools",
"codex-utils-absolute-path",
"codex-utils-cache",
@@ -1969,7 +2029,6 @@ dependencies = [
"codex-utils-string",
"codex-utils-template",
"codex-windows-sandbox",
"core-foundation 0.9.4",
"core_test_support",
"crypto_box",
"csv",
@@ -1987,7 +2046,6 @@ dependencies = [
"insta",
"libc",
"maplit",
"notify",
"once_cell",
"openssl-sys",
"opentelemetry",
@@ -2023,7 +2081,6 @@ dependencies = [
"walkdir",
"which 8.0.0",
"whoami",
"windows-sys 0.52.0",
"wiremock",
"zip 2.4.2",
"zstd 0.13.3",
@@ -2262,6 +2319,17 @@ dependencies = [
"tokio",
]
[[package]]
name = "codex-file-watcher"
version = "0.0.0"
dependencies = [
"notify",
"pretty_assertions",
"tempfile",
"tokio",
"tracing",
]
[[package]]
name = "codex-git-utils"
version = "0.0.0"
@@ -2320,6 +2388,10 @@ dependencies = [
"serde",
]
[[package]]
name = "codex-js-repl"
version = "0.0.0"
[[package]]
name = "codex-keyring-store"
version = "0.0.0"
@@ -2482,6 +2554,35 @@ dependencies = [
"pretty_assertions",
]
[[package]]
name = "codex-mcp-tool-approval"
version = "0.0.0"
dependencies = [
"pretty_assertions",
"serde",
"serde_json",
"tracing",
]
[[package]]
name = "codex-memory-prompts"
version = "0.0.0"
[[package]]
name = "codex-message-history"
version = "0.0.0"
dependencies = [
"codex-config",
"codex-protocol",
"codex-utils-absolute-path",
"pretty_assertions",
"serde",
"serde_json",
"tempfile",
"tokio",
"tracing",
]
[[package]]
name = "codex-model-provider-info"
version = "0.0.0"
@@ -2707,6 +2808,18 @@ dependencies = [
"zeroize",
]
[[package]]
name = "codex-review"
version = "0.0.0"
dependencies = [
"anyhow",
"codex-git-utils",
"codex-protocol",
"codex-utils-absolute-path",
"codex-utils-template",
"pretty_assertions",
]
[[package]]
name = "codex-rmcp-client"
version = "0.0.0"
@@ -2808,6 +2921,17 @@ dependencies = [
"tracing",
]
[[package]]
name = "codex-session-runtime"
version = "0.0.0"
dependencies = [
"codex-protocol",
"codex-rmcp-client",
"codex-sandboxing",
"rmcp",
"tokio",
]
[[package]]
name = "codex-shell-command"
version = "0.0.0"
@@ -2932,6 +3056,15 @@ dependencies = [
"uuid",
]
[[package]]
name = "codex-tool-spec"
version = "0.0.0"
dependencies = [
"codex-mcp",
"codex-protocol",
"codex-tools",
]
[[package]]
name = "codex-tools"
version = "0.0.0"
@@ -3493,6 +3626,7 @@ dependencies = [
"assert_cmd",
"base64 0.22.1",
"codex-arg0",
"codex-code-mode-runtime",
"codex-core",
"codex-exec-server",
"codex-features",

View File

@@ -1,6 +1,7 @@
[workspace]
members = [
"analytics",
"agent-runtime",
"backend-client",
"ansi-escape",
"async-utils",
@@ -16,6 +17,8 @@ members = [
"install-context",
"codex-backend-openapi-models",
"code-mode",
"code-mode-runtime",
"config-loader",
"cloud-requirements",
"cloud-tasks",
"cloud-tasks-client",
@@ -38,11 +41,16 @@ members = [
"execpolicy",
"execpolicy-legacy",
"keyring-store",
"file-watcher",
"file-search",
"js-repl",
"linux-sandbox",
"lmstudio",
"login",
"codex-mcp",
"message-history",
"memory-prompts",
"mcp-tool-approval",
"mcp-server",
"model-provider-info",
"models-manager",
@@ -51,13 +59,16 @@ members = [
"process-hardening",
"protocol",
"realtime-webrtc",
"review",
"rollout",
"rmcp-client",
"responses-api-proxy",
"response-debug-context",
"sandboxing",
"session-runtime",
"stdio-to-uds",
"otel",
"tool-spec",
"tui",
"tools",
"v8-poc",
@@ -109,6 +120,7 @@ license = "Apache-2.0"
# Internal
app_test_support = { path = "app-server/tests/common" }
codex-analytics = { path = "analytics" }
codex-agent-runtime = { path = "agent-runtime" }
codex-ansi-escape = { path = "ansi-escape" }
codex-api = { path = "codex-api" }
codex-app-server = { path = "app-server" }
@@ -127,7 +139,9 @@ codex-cloud-requirements = { path = "cloud-requirements" }
codex-cloud-tasks-client = { path = "cloud-tasks-client" }
codex-cloud-tasks-mock-client = { path = "cloud-tasks-mock-client" }
codex-code-mode = { path = "code-mode" }
codex-code-mode-runtime = { path = "code-mode-runtime" }
codex-config = { path = "config" }
codex-config-loader = { path = "config-loader" }
codex-connectors = { path = "connectors" }
codex-core = { path = "core" }
codex-core-plugins = { path = "core-plugins" }
@@ -138,16 +152,21 @@ codex-execpolicy = { path = "execpolicy" }
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
codex-features = { path = "features" }
codex-feedback = { path = "feedback" }
codex-file-watcher = { path = "file-watcher" }
codex-install-context = { path = "install-context" }
codex-file-search = { path = "file-search" }
codex-git-utils = { path = "git-utils" }
codex-hooks = { path = "hooks" }
codex-instructions = { path = "instructions" }
codex-js-repl = { path = "js-repl" }
codex-keyring-store = { path = "keyring-store" }
codex-linux-sandbox = { path = "linux-sandbox" }
codex-lmstudio = { path = "lmstudio" }
codex-login = { path = "login" }
codex-mcp = { path = "codex-mcp" }
codex-message-history = { path = "message-history" }
codex-memory-prompts = { path = "memory-prompts" }
codex-mcp-tool-approval = { path = "mcp-tool-approval" }
codex-mcp-server = { path = "mcp-server" }
codex-model-provider-info = { path = "model-provider-info" }
codex-models-manager = { path = "models-manager" }
@@ -161,10 +180,12 @@ codex-protocol = { path = "protocol" }
codex-realtime-webrtc = { path = "realtime-webrtc" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-response-debug-context = { path = "response-debug-context" }
codex-review = { path = "review" }
codex-rmcp-client = { path = "rmcp-client" }
codex-rollout = { path = "rollout" }
codex-sandboxing = { path = "sandboxing" }
codex-secrets = { path = "secrets" }
codex-session-runtime = { path = "session-runtime" }
codex-shell-command = { path = "shell-command" }
codex-shell-escalation = { path = "shell-escalation" }
codex-skills = { path = "skills" }
@@ -173,6 +194,7 @@ codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-terminal-detection = { path = "terminal-detection" }
codex-test-binary-support = { path = "test-binary-support" }
codex-thread-store = { path = "thread-store" }
codex-tool-spec = { path = "tool-spec" }
codex-tools = { path = "tools" }
codex-tui = { path = "tui" }
codex-utils-absolute-path = { path = "utils/absolute-path" }

View File

@@ -0,0 +1,9 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "agent-runtime",
crate_name = "codex_agent_runtime",
compile_data = [
"src/agent_names.txt",
],
)

View File

@@ -0,0 +1,23 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-agent-runtime"
version.workspace = true
[lib]
doctest = false
name = "codex_agent_runtime"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
rand = { workspace = true }
tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
pretty_assertions = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt"] }

View File

@@ -0,0 +1,225 @@
use crate::registry::AgentMetadata;
use codex_protocol::AgentPath;
use codex_protocol::ThreadId;
use codex_protocol::models::MessagePhase;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::AgentStatus;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_protocol::user_input::UserInput;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum SpawnAgentForkMode {
FullHistory,
LastNTurns(usize),
}
#[derive(Clone, Debug, Default)]
pub struct SpawnAgentOptions {
pub fork_parent_spawn_call_id: Option<String>,
pub fork_mode: Option<SpawnAgentForkMode>,
}
#[derive(Clone, Debug)]
pub struct LiveAgent {
pub thread_id: ThreadId,
pub metadata: AgentMetadata,
pub status: AgentStatus,
}
pub fn keep_forked_rollout_item(item: &RolloutItem) -> bool {
match item {
RolloutItem::ResponseItem(ResponseItem::Message { role, phase, .. }) => match role.as_str()
{
"system" | "developer" | "user" => true,
"assistant" => *phase == Some(MessagePhase::FinalAnswer),
_ => false,
},
RolloutItem::ResponseItem(
ResponseItem::Reasoning { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::ToolSearchCall { .. }
| ResponseItem::FunctionCallOutput { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::CustomToolCallOutput { .. }
| ResponseItem::ToolSearchOutput { .. }
| ResponseItem::WebSearchCall { .. }
| ResponseItem::ImageGenerationCall { .. }
| ResponseItem::GhostSnapshot { .. }
| ResponseItem::Compaction { .. }
| ResponseItem::Other,
) => false,
RolloutItem::Compacted(_)
| RolloutItem::EventMsg(_)
| RolloutItem::SessionMeta(_)
| RolloutItem::TurnContext(_) => true,
}
}
pub fn thread_spawn_parent_thread_id(session_source: &SessionSource) -> Option<ThreadId> {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id, ..
}) => Some(*parent_thread_id),
_ => None,
}
}
pub fn thread_spawn_depth(session_source: &SessionSource) -> Option<i32> {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { depth, .. }) => Some(*depth),
_ => None,
}
}
pub fn agent_matches_prefix(agent_path: Option<&AgentPath>, prefix: &AgentPath) -> bool {
if prefix.is_root() {
return true;
}
agent_path.is_some_and(|agent_path| {
agent_path == prefix
|| agent_path
.as_str()
.strip_prefix(prefix.as_str())
.is_some_and(|suffix| suffix.starts_with('/'))
})
}
pub fn render_input_preview(initial_operation: &Op) -> String {
match initial_operation {
Op::UserInput { items, .. } => items
.iter()
.map(|item| match item {
UserInput::Text { text, .. } => text.clone(),
UserInput::Image { .. } => "[image]".to_string(),
UserInput::LocalImage { path } => format!("[local_image:{}]", path.display()),
UserInput::Skill { name, path } => format!("[skill:${name}]({})", path.display()),
UserInput::Mention { name, path } => format!("[mention:${name}]({path})"),
_ => "[input]".to_string(),
})
.collect::<Vec<_>>()
.join("\n"),
Op::InterAgentCommunication { communication } => communication.content.clone(),
_ => String::new(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::models::ContentItem;
use codex_protocol::protocol::InterAgentCommunication;
use pretty_assertions::assert_eq;
fn agent_path(path: &str) -> AgentPath {
AgentPath::try_from(path).expect("valid agent path")
}
#[test]
fn render_input_preview_summarizes_user_input_items() {
let op = Op::UserInput {
items: vec![
UserInput::Text {
text: "hello".to_string(),
text_elements: Vec::new(),
},
UserInput::Image {
image_url: "data:image/png;base64,abc".to_string(),
},
UserInput::Mention {
name: "doc".to_string(),
path: "app://doc".to_string(),
},
],
final_output_json_schema: None,
responsesapi_client_metadata: None,
};
assert_eq!(
render_input_preview(&op),
"hello\n[image]\n[mention:$doc](app://doc)"
);
}
#[test]
fn render_input_preview_uses_inter_agent_message_content() {
let communication = InterAgentCommunication::new(
AgentPath::root(),
agent_path("/root/worker"),
Vec::new(),
"wake up".to_string(),
/*trigger_turn*/ true,
);
let op = Op::InterAgentCommunication { communication };
assert_eq!(render_input_preview(&op), "wake up");
}
#[test]
fn agent_matches_prefix_accepts_root_exact_and_descendants() {
let worker = agent_path("/root/worker");
let worker_child = agent_path("/root/worker/child");
let other = agent_path("/root/other");
assert!(agent_matches_prefix(Some(&worker), &AgentPath::root()));
assert!(agent_matches_prefix(Some(&worker), &worker));
assert!(agent_matches_prefix(Some(&worker_child), &worker));
assert!(!agent_matches_prefix(Some(&other), &worker));
assert!(!agent_matches_prefix(/*agent_path*/ None, &worker));
}
#[test]
fn thread_spawn_parent_and_depth_only_match_thread_spawn_sources() {
let parent_thread_id = ThreadId::new();
let session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 2,
agent_path: None,
agent_nickname: None,
agent_role: None,
});
assert_eq!(
thread_spawn_parent_thread_id(&session_source),
Some(parent_thread_id)
);
assert_eq!(thread_spawn_depth(&session_source), Some(2));
assert_eq!(thread_spawn_parent_thread_id(&SessionSource::Cli), None);
assert_eq!(
thread_spawn_depth(&SessionSource::SubAgent(SubAgentSource::Review)),
None
);
}
#[test]
fn forked_rollout_filter_keeps_only_contextual_items_and_final_assistant_messages() {
let final_assistant_message = RolloutItem::ResponseItem(ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "done".to_string(),
}],
end_turn: None,
phase: Some(MessagePhase::FinalAnswer),
});
let in_progress_assistant_message = RolloutItem::ResponseItem(ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "thinking".to_string(),
}],
end_turn: None,
phase: None,
});
assert!(keep_forked_rollout_item(&final_assistant_message));
assert!(!keep_forked_rollout_item(&in_progress_assistant_message));
assert!(!keep_forked_rollout_item(&RolloutItem::ResponseItem(
ResponseItem::Other
)));
}
}

View File

@@ -0,0 +1,34 @@
//! Shared runtime primitives for Codex multi-agent orchestration.
pub mod control;
pub mod mailbox;
pub mod registry;
pub mod status;
pub use codex_protocol::protocol::AgentStatus;
pub use control::LiveAgent;
pub use control::SpawnAgentForkMode;
pub use control::SpawnAgentOptions;
pub use control::agent_matches_prefix;
pub use control::keep_forked_rollout_item;
pub use control::render_input_preview;
pub use control::thread_spawn_depth;
pub use control::thread_spawn_parent_thread_id;
pub use mailbox::Mailbox;
pub use mailbox::MailboxReceiver;
pub use registry::AgentMetadata;
pub use registry::AgentRegistry;
pub use registry::SpawnReservation;
pub use registry::exceeds_thread_spawn_depth_limit;
pub use registry::next_thread_spawn_depth;
pub use status::agent_status_from_event;
const AGENT_NAMES: &str = include_str!("agent_names.txt");
pub fn default_agent_nickname_list() -> Vec<&'static str> {
AGENT_NAMES
.lines()
.map(str::trim)
.filter(|name| !name.is_empty())
.collect()
}

View File

@@ -0,0 +1,161 @@
use codex_protocol::protocol::InterAgentCommunication;
use std::collections::VecDeque;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc;
use tokio::sync::watch;
#[cfg(test)]
use codex_protocol::AgentPath;
pub struct Mailbox {
tx: mpsc::UnboundedSender<InterAgentCommunication>,
next_seq: AtomicU64,
seq_tx: watch::Sender<u64>,
}
pub struct MailboxReceiver {
rx: mpsc::UnboundedReceiver<InterAgentCommunication>,
pending_mails: VecDeque<InterAgentCommunication>,
}
impl Mailbox {
pub fn new() -> (Self, MailboxReceiver) {
let (tx, rx) = mpsc::unbounded_channel();
let (seq_tx, _) = watch::channel(0);
(
Self {
tx,
next_seq: AtomicU64::new(0),
seq_tx,
},
MailboxReceiver {
rx,
pending_mails: VecDeque::new(),
},
)
}
pub fn subscribe(&self) -> watch::Receiver<u64> {
self.seq_tx.subscribe()
}
pub fn send(&self, communication: InterAgentCommunication) -> u64 {
let seq = self.next_seq.fetch_add(1, Ordering::Relaxed) + 1;
let _ = self.tx.send(communication);
self.seq_tx.send_replace(seq);
seq
}
}
impl MailboxReceiver {
fn sync_pending_mails(&mut self) {
while let Ok(mail) = self.rx.try_recv() {
self.pending_mails.push_back(mail);
}
}
pub fn has_pending(&mut self) -> bool {
self.sync_pending_mails();
!self.pending_mails.is_empty()
}
pub fn has_pending_trigger_turn(&mut self) -> bool {
self.sync_pending_mails();
self.pending_mails.iter().any(|mail| mail.trigger_turn)
}
pub fn drain(&mut self) -> Vec<InterAgentCommunication> {
self.sync_pending_mails();
self.pending_mails.drain(..).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn make_mail(
author: AgentPath,
recipient: AgentPath,
content: &str,
trigger_turn: bool,
) -> InterAgentCommunication {
InterAgentCommunication::new(
author,
recipient,
Vec::new(),
content.to_string(),
trigger_turn,
)
}
#[tokio::test]
async fn mailbox_assigns_monotonic_sequence_numbers() {
let (mailbox, _receiver) = Mailbox::new();
let mut seq_rx = mailbox.subscribe();
let seq_a = mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"one",
/*trigger_turn*/ false,
));
let seq_b = mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"two",
/*trigger_turn*/ false,
));
seq_rx.changed().await.expect("first seq update");
assert_eq!(*seq_rx.borrow(), seq_b);
assert_eq!(seq_a, 1);
assert_eq!(seq_b, 2);
}
#[tokio::test]
async fn mailbox_drains_in_delivery_order() {
let (mailbox, mut receiver) = Mailbox::new();
let mail_one = make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"one",
/*trigger_turn*/ false,
);
let mail_two = make_mail(
AgentPath::try_from("/root/worker").expect("agent path"),
AgentPath::root(),
"two",
/*trigger_turn*/ false,
);
mailbox.send(mail_one.clone());
mailbox.send(mail_two.clone());
assert_eq!(receiver.drain(), vec![mail_one, mail_two]);
assert!(!receiver.has_pending());
}
#[tokio::test]
async fn mailbox_tracks_pending_trigger_turn_mail() {
let (mailbox, mut receiver) = Mailbox::new();
mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"queued",
/*trigger_turn*/ false,
));
assert!(!receiver.has_pending_trigger_turn());
mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"wake",
/*trigger_turn*/ true,
));
assert!(receiver.has_pending_trigger_turn());
}
}

View File

@@ -0,0 +1,344 @@
use codex_protocol::AgentPath;
use codex_protocol::ThreadId;
use codex_protocol::error::CodexErr;
use codex_protocol::error::Result;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use rand::prelude::IndexedRandom;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
/// This structure is used to add some limits on the multi-agent capabilities for Codex. In
/// the current implementation, it limits:
/// * Total number of sub-agents (i.e. threads) per user session
///
/// This structure is shared by all agents in the same user session (because the `AgentControl`
/// is).
#[derive(Default)]
pub struct AgentRegistry {
active_agents: Mutex<ActiveAgents>,
total_count: AtomicUsize,
}
#[derive(Default)]
struct ActiveAgents {
agent_tree: HashMap<String, AgentMetadata>,
used_agent_nicknames: HashSet<String>,
nickname_reset_count: usize,
}
#[derive(Clone, Debug, Default)]
pub struct AgentMetadata {
pub agent_id: Option<ThreadId>,
pub agent_path: Option<AgentPath>,
pub agent_nickname: Option<String>,
pub agent_role: Option<String>,
pub last_task_message: Option<String>,
}
fn format_agent_nickname(name: &str, nickname_reset_count: usize) -> String {
match nickname_reset_count {
0 => name.to_string(),
reset_count => {
let value = reset_count + 1;
let suffix = match value % 100 {
11..=13 => "th",
_ => match value % 10 {
1 => "st", // codespell:ignore
2 => "nd", // codespell:ignore
3 => "rd", // codespell:ignore
_ => "th", // codespell:ignore
},
};
format!("{name} the {value}{suffix}")
}
}
}
fn session_depth(session_source: &SessionSource) -> i32 {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { depth, .. }) => *depth,
SessionSource::SubAgent(_) => 0,
_ => 0,
}
}
pub fn next_thread_spawn_depth(session_source: &SessionSource) -> i32 {
session_depth(session_source).saturating_add(1)
}
pub fn exceeds_thread_spawn_depth_limit(depth: i32, max_depth: i32) -> bool {
depth > max_depth
}
impl AgentRegistry {
pub fn reserve_spawn_slot(
self: &Arc<Self>,
max_threads: Option<usize>,
) -> Result<SpawnReservation> {
if let Some(max_threads) = max_threads {
if !self.try_increment_spawned(max_threads) {
return Err(CodexErr::AgentLimitReached { max_threads });
}
} else {
self.total_count.fetch_add(1, Ordering::AcqRel);
}
Ok(SpawnReservation {
state: Arc::clone(self),
active: true,
reserved_agent_nickname: None,
reserved_agent_path: None,
})
}
pub fn release_spawned_thread(&self, thread_id: ThreadId) {
let removed_counted_agent = {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let removed_key = active_agents
.agent_tree
.iter()
.find_map(|(key, metadata)| (metadata.agent_id == Some(thread_id)).then_some(key))
.cloned();
removed_key
.and_then(|key| active_agents.agent_tree.remove(key.as_str()))
.is_some_and(|metadata| {
!metadata.agent_path.as_ref().is_some_and(AgentPath::is_root)
})
};
if removed_counted_agent {
self.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
pub fn register_root_thread(&self, thread_id: ThreadId) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
active_agents
.agent_tree
.entry(AgentPath::ROOT.to_string())
.or_insert_with(|| AgentMetadata {
agent_id: Some(thread_id),
agent_path: Some(AgentPath::root()),
..Default::default()
});
}
pub fn agent_id_for_path(&self, agent_path: &AgentPath) -> Option<ThreadId> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.get(agent_path.as_str())
.and_then(|metadata| metadata.agent_id)
}
pub fn agent_metadata_for_thread(&self, thread_id: ThreadId) -> Option<AgentMetadata> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.values()
.find(|metadata| metadata.agent_id == Some(thread_id))
.cloned()
}
pub fn live_agents(&self) -> Vec<AgentMetadata> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.values()
.filter(|metadata| {
metadata.agent_id.is_some()
&& !metadata.agent_path.as_ref().is_some_and(AgentPath::is_root)
})
.cloned()
.collect()
}
pub fn update_last_task_message(&self, thread_id: ThreadId, last_task_message: String) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
if let Some(metadata) = active_agents
.agent_tree
.values_mut()
.find(|metadata| metadata.agent_id == Some(thread_id))
{
metadata.last_task_message = Some(last_task_message);
}
}
fn register_spawned_thread(&self, agent_metadata: AgentMetadata) {
let Some(thread_id) = agent_metadata.agent_id else {
return;
};
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let key = agent_metadata
.agent_path
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| format!("thread:{thread_id}"));
if let Some(agent_nickname) = agent_metadata.agent_nickname.clone() {
active_agents.used_agent_nicknames.insert(agent_nickname);
}
active_agents.agent_tree.insert(key, agent_metadata);
}
fn reserve_agent_nickname(&self, names: &[&str], preferred: Option<&str>) -> Option<String> {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let agent_nickname = if let Some(preferred) = preferred {
preferred.to_string()
} else {
if names.is_empty() {
return None;
}
let available_names: Vec<String> = names
.iter()
.map(|name| format_agent_nickname(name, active_agents.nickname_reset_count))
.filter(|name| !active_agents.used_agent_nicknames.contains(name))
.collect();
if let Some(name) = available_names.choose(&mut rand::rng()) {
name.clone()
} else {
active_agents.used_agent_nicknames.clear();
active_agents.nickname_reset_count += 1;
if let Some(metrics) = codex_otel::global() {
let _ = metrics.counter(
"codex.multi_agent.nickname_pool_reset",
/*inc*/ 1,
&[],
);
}
format_agent_nickname(
names.choose(&mut rand::rng())?,
active_agents.nickname_reset_count,
)
}
};
active_agents
.used_agent_nicknames
.insert(agent_nickname.clone());
Some(agent_nickname)
}
fn reserve_agent_path(&self, agent_path: &AgentPath) -> Result<()> {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
match active_agents.agent_tree.entry(agent_path.to_string()) {
Entry::Occupied(_) => Err(CodexErr::UnsupportedOperation(format!(
"agent path `{agent_path}` already exists"
))),
Entry::Vacant(entry) => {
entry.insert(AgentMetadata {
agent_path: Some(agent_path.clone()),
..Default::default()
});
Ok(())
}
}
}
fn release_reserved_agent_path(&self, agent_path: &AgentPath) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
if active_agents
.agent_tree
.get(agent_path.as_str())
.is_some_and(|metadata| metadata.agent_id.is_none())
{
active_agents.agent_tree.remove(agent_path.as_str());
}
}
fn try_increment_spawned(&self, max_threads: usize) -> bool {
let mut current = self.total_count.load(Ordering::Acquire);
loop {
if current >= max_threads {
return false;
}
match self.total_count.compare_exchange_weak(
current,
current + 1,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(updated) => current = updated,
}
}
}
}
pub struct SpawnReservation {
state: Arc<AgentRegistry>,
active: bool,
reserved_agent_nickname: Option<String>,
reserved_agent_path: Option<AgentPath>,
}
impl SpawnReservation {
pub fn reserve_agent_nickname_with_preference(
&mut self,
names: &[&str],
preferred: Option<&str>,
) -> Result<String> {
let agent_nickname = self
.state
.reserve_agent_nickname(names, preferred)
.ok_or_else(|| {
CodexErr::UnsupportedOperation("no available agent nicknames".to_string())
})?;
self.reserved_agent_nickname = Some(agent_nickname.clone());
Ok(agent_nickname)
}
pub fn reserve_agent_path(&mut self, agent_path: &AgentPath) -> Result<()> {
self.state.reserve_agent_path(agent_path)?;
self.reserved_agent_path = Some(agent_path.clone());
Ok(())
}
pub fn commit(mut self, agent_metadata: AgentMetadata) {
self.reserved_agent_nickname = None;
self.reserved_agent_path = None;
self.state.register_spawned_thread(agent_metadata);
self.active = false;
}
}
impl Drop for SpawnReservation {
fn drop(&mut self) {
if self.active {
if let Some(agent_path) = self.reserved_agent_path.take() {
self.state.release_reserved_agent_path(&agent_path);
}
self.state.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
}
#[cfg(test)]
#[path = "registry_tests.rs"]
mod tests;

View File

@@ -0,0 +1,350 @@
use super::*;
use codex_protocol::AgentPath;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
fn agent_path(path: &str) -> AgentPath {
AgentPath::try_from(path).expect("valid agent path")
}
fn agent_metadata(thread_id: ThreadId) -> AgentMetadata {
AgentMetadata {
agent_id: Some(thread_id),
..Default::default()
}
}
#[test]
fn format_agent_nickname_adds_ordinals_after_reset() {
assert_eq!(
format_agent_nickname("Plato", /*nickname_reset_count*/ 0),
"Plato"
);
assert_eq!(
format_agent_nickname("Plato", /*nickname_reset_count*/ 1),
"Plato the 2nd"
);
assert_eq!(
format_agent_nickname("Plato", /*nickname_reset_count*/ 2),
"Plato the 3rd"
);
assert_eq!(
format_agent_nickname("Plato", /*nickname_reset_count*/ 10),
"Plato the 11th"
);
assert_eq!(
format_agent_nickname("Plato", /*nickname_reset_count*/ 20),
"Plato the 21st"
);
}
#[test]
fn session_depth_defaults_to_zero_for_root_sources() {
assert_eq!(session_depth(&SessionSource::Cli), 0);
}
#[test]
fn thread_spawn_depth_increments_and_enforces_limit() {
let session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id: ThreadId::new(),
depth: 1,
agent_path: None,
agent_nickname: None,
agent_role: None,
});
let child_depth = next_thread_spawn_depth(&session_source);
assert_eq!(child_depth, 2);
assert!(exceeds_thread_spawn_depth_limit(
child_depth,
/*max_depth*/ 1
));
}
#[test]
fn non_thread_spawn_subagents_default_to_depth_zero() {
let session_source = SessionSource::SubAgent(SubAgentSource::Review);
assert_eq!(session_depth(&session_source), 0);
assert_eq!(next_thread_spawn_depth(&session_source), 1);
assert!(!exceeds_thread_spawn_depth_limit(
/*depth*/ 1, /*max_depth*/ 1
));
}
#[test]
fn reservation_drop_releases_slot() {
let registry = Arc::new(AgentRegistry::default());
let reservation = registry.reserve_spawn_slot(Some(1)).expect("reserve slot");
drop(reservation);
let reservation = registry.reserve_spawn_slot(Some(1)).expect("slot released");
drop(reservation);
}
#[test]
fn commit_holds_slot_until_release() {
let registry = Arc::new(AgentRegistry::default());
let reservation = registry.reserve_spawn_slot(Some(1)).expect("reserve slot");
let thread_id = ThreadId::new();
reservation.commit(agent_metadata(thread_id));
let err = match registry.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
registry.release_spawned_thread(thread_id);
let reservation = registry
.reserve_spawn_slot(Some(1))
.expect("slot released after thread removal");
drop(reservation);
}
#[test]
fn release_ignores_unknown_thread_id() {
let registry = Arc::new(AgentRegistry::default());
let reservation = registry.reserve_spawn_slot(Some(1)).expect("reserve slot");
let thread_id = ThreadId::new();
reservation.commit(agent_metadata(thread_id));
registry.release_spawned_thread(ThreadId::new());
let err = match registry.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should still be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
registry.release_spawned_thread(thread_id);
let reservation = registry
.reserve_spawn_slot(Some(1))
.expect("slot released after real thread removal");
drop(reservation);
}
#[test]
fn release_is_idempotent_for_registered_threads() {
let registry = Arc::new(AgentRegistry::default());
let reservation = registry.reserve_spawn_slot(Some(1)).expect("reserve slot");
let first_id = ThreadId::new();
reservation.commit(agent_metadata(first_id));
registry.release_spawned_thread(first_id);
let reservation = registry.reserve_spawn_slot(Some(1)).expect("slot reused");
let second_id = ThreadId::new();
reservation.commit(agent_metadata(second_id));
registry.release_spawned_thread(first_id);
let err = match registry.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should still be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
registry.release_spawned_thread(second_id);
let reservation = registry
.reserve_spawn_slot(Some(1))
.expect("slot released after second thread removal");
drop(reservation);
}
#[test]
fn failed_spawn_keeps_nickname_marked_used() {
let registry = Arc::new(AgentRegistry::default());
let mut reservation = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve slot");
let agent_nickname = reservation
.reserve_agent_nickname_with_preference(&["alpha"], /*preferred*/ None)
.expect("reserve agent name");
assert_eq!(agent_nickname, "alpha");
drop(reservation);
let mut reservation = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve slot");
let agent_nickname = reservation
.reserve_agent_nickname_with_preference(&["alpha", "beta"], /*preferred*/ None)
.expect("unused name should still be preferred");
assert_eq!(agent_nickname, "beta");
}
#[test]
fn agent_nickname_resets_used_pool_when_exhausted() {
let registry = Arc::new(AgentRegistry::default());
let mut first = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve first slot");
let first_name = first
.reserve_agent_nickname_with_preference(&["alpha"], /*preferred*/ None)
.expect("reserve first agent name");
let first_id = ThreadId::new();
first.commit(agent_metadata(first_id));
assert_eq!(first_name, "alpha");
let mut second = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve second slot");
let second_name = second
.reserve_agent_nickname_with_preference(&["alpha"], /*preferred*/ None)
.expect("name should be reused after pool reset");
assert_eq!(second_name, "alpha the 2nd");
let active_agents = registry
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
assert_eq!(active_agents.nickname_reset_count, 1);
}
#[test]
fn released_nickname_stays_used_until_pool_reset() {
let registry = Arc::new(AgentRegistry::default());
let mut first = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve first slot");
let first_name = first
.reserve_agent_nickname_with_preference(&["alpha"], /*preferred*/ None)
.expect("reserve first agent name");
let first_id = ThreadId::new();
first.commit(agent_metadata(first_id));
assert_eq!(first_name, "alpha");
registry.release_spawned_thread(first_id);
let mut second = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve second slot");
let second_name = second
.reserve_agent_nickname_with_preference(&["alpha", "beta"], /*preferred*/ None)
.expect("released name should still be marked used");
assert_eq!(second_name, "beta");
let second_id = ThreadId::new();
second.commit(agent_metadata(second_id));
registry.release_spawned_thread(second_id);
let mut third = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve third slot");
let third_name = third
.reserve_agent_nickname_with_preference(&["alpha", "beta"], /*preferred*/ None)
.expect("pool reset should permit a duplicate");
let expected_names = HashSet::from(["alpha the 2nd".to_string(), "beta the 2nd".to_string()]);
assert!(expected_names.contains(&third_name));
let active_agents = registry
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
assert_eq!(active_agents.nickname_reset_count, 1);
}
#[test]
fn repeated_resets_advance_the_ordinal_suffix() {
let registry = Arc::new(AgentRegistry::default());
let mut first = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve first slot");
let first_name = first
.reserve_agent_nickname_with_preference(&["Plato"], /*preferred*/ None)
.expect("reserve first agent name");
let first_id = ThreadId::new();
first.commit(agent_metadata(first_id));
assert_eq!(first_name, "Plato");
registry.release_spawned_thread(first_id);
let mut second = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve second slot");
let second_name = second
.reserve_agent_nickname_with_preference(&["Plato"], /*preferred*/ None)
.expect("reserve second agent name");
let second_id = ThreadId::new();
second.commit(agent_metadata(second_id));
assert_eq!(second_name, "Plato the 2nd");
registry.release_spawned_thread(second_id);
let mut third = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve third slot");
let third_name = third
.reserve_agent_nickname_with_preference(&["Plato"], /*preferred*/ None)
.expect("reserve third agent name");
assert_eq!(third_name, "Plato the 3rd");
let active_agents = registry
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
assert_eq!(active_agents.nickname_reset_count, 2);
}
#[test]
fn register_root_thread_indexes_root_path() {
let registry = Arc::new(AgentRegistry::default());
let root_thread_id = ThreadId::new();
registry.register_root_thread(root_thread_id);
assert_eq!(
registry.agent_id_for_path(&AgentPath::root()),
Some(root_thread_id)
);
}
#[test]
fn reserved_agent_path_is_released_when_spawn_fails() {
let registry = Arc::new(AgentRegistry::default());
let mut first = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve first slot");
first
.reserve_agent_path(&agent_path("/root/researcher"))
.expect("reserve first path");
drop(first);
let mut second = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve second slot");
second
.reserve_agent_path(&agent_path("/root/researcher"))
.expect("dropped reservation should free the path");
}
#[test]
fn committed_agent_path_is_indexed_until_release() {
let registry = Arc::new(AgentRegistry::default());
let thread_id = ThreadId::new();
let mut reservation = registry
.reserve_spawn_slot(/*max_threads*/ None)
.expect("reserve slot");
reservation
.reserve_agent_path(&agent_path("/root/researcher"))
.expect("reserve path");
reservation.commit(AgentMetadata {
agent_id: Some(thread_id),
agent_path: Some(agent_path("/root/researcher")),
..Default::default()
});
assert_eq!(
registry.agent_id_for_path(&agent_path("/root/researcher")),
Some(thread_id)
);
registry.release_spawned_thread(thread_id);
assert_eq!(
registry.agent_id_for_path(&agent_path("/root/researcher")),
None
);
}

View File

@@ -0,0 +1,27 @@
use codex_protocol::protocol::AgentStatus;
use codex_protocol::protocol::EventMsg;
/// Derive the next agent status from a single emitted event.
/// Returns `None` when the event does not affect status tracking.
pub fn agent_status_from_event(msg: &EventMsg) -> Option<AgentStatus> {
match msg {
EventMsg::TurnStarted(_) => Some(AgentStatus::Running),
EventMsg::TurnComplete(ev) => Some(AgentStatus::Completed(ev.last_agent_message.clone())),
EventMsg::TurnAborted(ev) => match ev.reason {
codex_protocol::protocol::TurnAbortReason::Interrupted => {
Some(AgentStatus::Interrupted)
}
_ => Some(AgentStatus::Errored(format!("{:?}", ev.reason))),
},
EventMsg::Error(ev) => Some(AgentStatus::Errored(ev.message.clone())),
EventMsg::ShutdownComplete => Some(AgentStatus::Shutdown),
_ => None,
}
}
pub fn is_final(status: &AgentStatus) -> bool {
!matches!(
status,
AgentStatus::PendingInit | AgentStatus::Running | AgentStatus::Interrupted
)
}

View File

@@ -33,6 +33,7 @@ codex-analytics = { workspace = true }
codex-arg0 = { workspace = true }
codex-cloud-requirements = { workspace = true }
codex-config = { workspace = true }
codex-code-mode-runtime = { workspace = true }
codex-core = { workspace = true }
codex-core-plugins = { workspace = true }
codex-exec-server = { workspace = true }

View File

@@ -272,7 +272,7 @@ impl MessageProcessor {
config.chatgpt_base_url.trim_end_matches('/').to_string(),
config.analytics_enabled,
);
let thread_manager = Arc::new(ThreadManager::new(
let thread_manager = Arc::new(ThreadManager::new_with_code_mode_runtime_factory(
config.as_ref(),
auth_manager.clone(),
session_source,
@@ -283,6 +283,7 @@ impl MessageProcessor {
},
environment_manager,
Some(analytics_events_client.clone()),
codex_code_mode_runtime::runtime_factory(),
));
thread_manager
.plugins_manager()

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "code-mode-runtime",
crate_name = "codex_code_mode_runtime",
)

View File

@@ -0,0 +1,28 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-code-mode-runtime"
version.workspace = true
[lib]
doctest = false
name = "codex_code_mode_runtime"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
async-channel = { workspace = true }
async-trait = { workspace = true }
codex-code-mode = { workspace = true }
codex-protocol = { workspace = true }
deno_core_icudata = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "sync", "time"] }
tokio-util = { workspace = true, features = ["rt"] }
tracing = { workspace = true }
v8 = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }

View File

@@ -0,0 +1,14 @@
mod runtime;
mod service;
pub use codex_code_mode::CodeModeTurnHost;
pub use codex_code_mode::*;
pub use runtime::DEFAULT_EXEC_YIELD_TIME_MS;
pub use runtime::DEFAULT_MAX_OUTPUT_TOKENS_PER_EXEC_CALL;
pub use runtime::DEFAULT_WAIT_YIELD_TIME_MS;
pub use runtime::ExecuteRequest;
pub use runtime::RuntimeResponse;
pub use runtime::WaitRequest;
pub use service::CodeModeService;
pub use service::CodeModeTurnWorker;
pub use service::runtime_factory;

View File

@@ -1,4 +1,4 @@
use crate::response::FunctionCallOutputContentItem;
use codex_code_mode::FunctionCallOutputContentItem;
use super::EXIT_SENTINEL;
use super::RuntimeEvent;

View File

@@ -13,51 +13,19 @@ use codex_protocol::ToolName;
use serde_json::Value as JsonValue;
use tokio::sync::mpsc;
use crate::description::EnabledToolMetadata;
use crate::description::ToolDefinition;
use crate::description::enabled_tool_metadata;
use crate::response::FunctionCallOutputContentItem;
pub use codex_code_mode::DEFAULT_EXEC_YIELD_TIME_MS;
pub use codex_code_mode::DEFAULT_MAX_OUTPUT_TOKENS_PER_EXEC_CALL;
pub use codex_code_mode::DEFAULT_WAIT_YIELD_TIME_MS;
pub use codex_code_mode::ExecuteRequest;
pub use codex_code_mode::RuntimeResponse;
pub use codex_code_mode::WaitRequest;
use codex_code_mode::EnabledToolMetadata;
use codex_code_mode::FunctionCallOutputContentItem;
use codex_code_mode::enabled_tool_metadata;
pub const DEFAULT_EXEC_YIELD_TIME_MS: u64 = 10_000;
pub const DEFAULT_WAIT_YIELD_TIME_MS: u64 = 10_000;
pub const DEFAULT_MAX_OUTPUT_TOKENS_PER_EXEC_CALL: usize = 10_000;
const EXIT_SENTINEL: &str = "__codex_code_mode_exit__";
#[derive(Clone, Debug)]
pub struct ExecuteRequest {
pub tool_call_id: String,
pub enabled_tools: Vec<ToolDefinition>,
pub source: String,
pub stored_values: HashMap<String, JsonValue>,
pub yield_time_ms: Option<u64>,
pub max_output_tokens: Option<usize>,
}
#[derive(Clone, Debug)]
pub struct WaitRequest {
pub cell_id: String,
pub yield_time_ms: u64,
pub terminate: bool,
}
#[derive(Debug, PartialEq)]
pub enum RuntimeResponse {
Yielded {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
},
Terminated {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
},
Result {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
stored_values: HashMap<String, JsonValue>,
error_text: Option<String>,
},
}
#[derive(Debug)]
pub(crate) enum TurnMessage {
ToolCall {

View File

@@ -1,7 +1,7 @@
use serde_json::Value as JsonValue;
use crate::response::FunctionCallOutputContentItem;
use crate::response::ImageDetail;
use codex_code_mode::FunctionCallOutputContentItem;
use codex_code_mode::ImageDetail;
const IMAGE_HELPER_EXPECTS_MESSAGE: &str = "image expects a non-empty image URL string, an object with image_url and optional detail, or a raw MCP image block";
const CODEX_IMAGE_DETAIL_META_KEY: &str = "codex/imageDetail";

View File

@@ -5,7 +5,6 @@ use std::sync::atomic::Ordering;
use std::time::Duration;
use async_trait::async_trait;
use codex_protocol::ToolName;
use serde_json::Value as JsonValue;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
@@ -13,7 +12,6 @@ use tokio::sync::oneshot;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use crate::FunctionCallOutputContentItem;
use crate::runtime::DEFAULT_EXEC_YIELD_TIME_MS;
use crate::runtime::ExecuteRequest;
use crate::runtime::RuntimeCommand;
@@ -22,18 +20,10 @@ use crate::runtime::RuntimeResponse;
use crate::runtime::TurnMessage;
use crate::runtime::WaitRequest;
use crate::runtime::spawn_runtime;
#[async_trait]
pub trait CodeModeTurnHost: Send + Sync {
async fn invoke_tool(
&self,
tool_name: ToolName,
input: Option<JsonValue>,
cancellation_token: CancellationToken,
) -> Result<JsonValue, String>;
async fn notify(&self, call_id: String, cell_id: String, text: String) -> Result<(), String>;
}
use codex_code_mode::CodeModeRuntimeFactory;
use codex_code_mode::CodeModeRuntimeService;
use codex_code_mode::CodeModeTurnHost;
use codex_code_mode::FunctionCallOutputContentItem;
#[derive(Clone)]
struct SessionHandle {
@@ -213,6 +203,29 @@ impl Default for CodeModeService {
}
}
#[async_trait]
impl CodeModeRuntimeService for CodeModeService {
async fn stored_values(&self) -> HashMap<String, JsonValue> {
self.stored_values().await
}
async fn replace_stored_values(&self, values: HashMap<String, JsonValue>) {
self.replace_stored_values(values).await;
}
async fn execute(&self, request: ExecuteRequest) -> Result<RuntimeResponse, String> {
self.execute(request).await
}
async fn wait(&self, request: WaitRequest) -> Result<RuntimeResponse, String> {
self.wait(request).await
}
fn start_turn_worker(&self, host: Arc<dyn CodeModeTurnHost>) -> Box<dyn Send> {
Box::new(self.start_turn_worker(host))
}
}
pub struct CodeModeTurnWorker {
shutdown_tx: Option<oneshot::Sender<()>>,
}
@@ -225,6 +238,10 @@ impl Drop for CodeModeTurnWorker {
}
}
pub fn runtime_factory() -> CodeModeRuntimeFactory {
Arc::new(|| Arc::new(CodeModeService::new()))
}
enum SessionControlCommand {
Poll {
yield_time_ms: u64,
@@ -480,10 +497,10 @@ mod tests {
use super::SessionControlCommand;
use super::SessionControlContext;
use super::run_session_control;
use crate::FunctionCallOutputContentItem;
use crate::runtime::ExecuteRequest;
use crate::runtime::RuntimeEvent;
use crate::runtime::spawn_runtime;
use codex_code_mode::FunctionCallOutputContentItem;
fn execute_request(source: &str) -> ExecuteRequest {
ExecuteRequest {

View File

@@ -13,16 +13,12 @@ path = "src/lib.rs"
workspace = true
[dependencies]
async-channel = { workspace = true }
async-trait = { workspace = true }
codex-protocol = { workspace = true }
deno_core_icudata = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "sync", "time"] }
tokio-util = { workspace = true, features = ["rt"] }
tracing = { workspace = true }
v8 = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }

View File

@@ -1,15 +1,19 @@
mod description;
mod response;
#[path = "runtime_stub.rs"]
mod runtime;
#[path = "service_stub.rs"]
mod service;
pub use description::CODE_MODE_PRAGMA_PREFIX;
pub use description::CodeModeToolKind;
pub use description::EnabledToolMetadata;
pub use description::ToolDefinition;
pub use description::ToolNamespaceDescription;
pub use description::augment_tool_definition;
pub use description::build_exec_tool_description;
pub use description::build_wait_tool_description;
pub use description::enabled_tool_metadata;
pub use description::is_code_mode_nested_tool;
pub use description::normalize_code_mode_identifier;
pub use description::parse_exec_source;
@@ -23,9 +27,12 @@ pub use runtime::DEFAULT_WAIT_YIELD_TIME_MS;
pub use runtime::ExecuteRequest;
pub use runtime::RuntimeResponse;
pub use runtime::WaitRequest;
pub use service::CodeModeRuntimeFactory;
pub use service::CodeModeRuntimeService;
pub use service::CodeModeService;
pub use service::CodeModeTurnHost;
pub use service::CodeModeTurnWorker;
pub use service::default_runtime_factory;
pub const PUBLIC_TOOL_NAME: &str = "exec";
pub const WAIT_TOOL_NAME: &str = "wait";

View File

@@ -0,0 +1,44 @@
use std::collections::HashMap;
use serde_json::Value as JsonValue;
use crate::response::FunctionCallOutputContentItem;
pub const DEFAULT_EXEC_YIELD_TIME_MS: u64 = 10_000;
pub const DEFAULT_WAIT_YIELD_TIME_MS: u64 = 10_000;
pub const DEFAULT_MAX_OUTPUT_TOKENS_PER_EXEC_CALL: usize = 10_000;
#[derive(Clone, Debug)]
pub struct ExecuteRequest {
pub tool_call_id: String,
pub enabled_tools: Vec<crate::ToolDefinition>,
pub source: String,
pub stored_values: HashMap<String, JsonValue>,
pub yield_time_ms: Option<u64>,
pub max_output_tokens: Option<usize>,
}
#[derive(Clone, Debug)]
pub struct WaitRequest {
pub cell_id: String,
pub yield_time_ms: u64,
pub terminate: bool,
}
#[derive(Debug, PartialEq)]
pub enum RuntimeResponse {
Yielded {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
},
Terminated {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
},
Result {
cell_id: String,
content_items: Vec<FunctionCallOutputContentItem>,
stored_values: HashMap<String, JsonValue>,
error_text: Option<String>,
},
}

View File

@@ -0,0 +1,120 @@
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use codex_protocol::ToolName;
use serde_json::Value as JsonValue;
use tokio::sync::Mutex;
use tokio_util::sync::CancellationToken;
use crate::runtime::ExecuteRequest;
use crate::runtime::RuntimeResponse;
use crate::runtime::WaitRequest;
pub type CodeModeRuntimeFactory = Arc<dyn Fn() -> Arc<dyn CodeModeRuntimeService> + Send + Sync>;
#[async_trait]
pub trait CodeModeTurnHost: Send + Sync {
async fn invoke_tool(
&self,
tool_name: ToolName,
input: Option<JsonValue>,
cancellation_token: CancellationToken,
) -> Result<JsonValue, String>;
async fn notify(&self, call_id: String, cell_id: String, text: String) -> Result<(), String>;
}
#[async_trait]
pub trait CodeModeRuntimeService: Send + Sync {
async fn stored_values(&self) -> HashMap<String, JsonValue>;
async fn replace_stored_values(&self, values: HashMap<String, JsonValue>);
async fn execute(&self, request: ExecuteRequest) -> Result<RuntimeResponse, String>;
async fn wait(&self, request: WaitRequest) -> Result<RuntimeResponse, String>;
fn start_turn_worker(&self, host: Arc<dyn CodeModeTurnHost>) -> Box<dyn Send>;
}
pub struct CodeModeService {
stored_values: Mutex<HashMap<String, JsonValue>>,
}
impl CodeModeService {
pub fn new() -> Self {
Self {
stored_values: Mutex::new(HashMap::new()),
}
}
pub async fn stored_values(&self) -> HashMap<String, JsonValue> {
self.stored_values.lock().await.clone()
}
pub async fn replace_stored_values(&self, values: HashMap<String, JsonValue>) {
*self.stored_values.lock().await = values;
}
pub async fn execute(&self, request: ExecuteRequest) -> Result<RuntimeResponse, String> {
Ok(RuntimeResponse::Result {
cell_id: request.tool_call_id,
content_items: Vec::new(),
stored_values: request.stored_values,
error_text: Some(
"code mode runtime is unavailable in this build of codex-code-mode".to_string(),
),
})
}
pub async fn wait(&self, request: WaitRequest) -> Result<RuntimeResponse, String> {
Ok(RuntimeResponse::Result {
cell_id: request.cell_id,
content_items: Vec::new(),
stored_values: self.stored_values().await,
error_text: Some(
"code mode runtime is unavailable in this build of codex-code-mode".to_string(),
),
})
}
pub fn start_turn_worker(&self, _host: Arc<dyn CodeModeTurnHost>) -> CodeModeTurnWorker {
CodeModeTurnWorker {}
}
}
#[async_trait]
impl CodeModeRuntimeService for CodeModeService {
async fn stored_values(&self) -> HashMap<String, JsonValue> {
self.stored_values().await
}
async fn replace_stored_values(&self, values: HashMap<String, JsonValue>) {
self.replace_stored_values(values).await;
}
async fn execute(&self, request: ExecuteRequest) -> Result<RuntimeResponse, String> {
self.execute(request).await
}
async fn wait(&self, request: WaitRequest) -> Result<RuntimeResponse, String> {
self.wait(request).await
}
fn start_turn_worker(&self, host: Arc<dyn CodeModeTurnHost>) -> Box<dyn Send> {
Box::new(self.start_turn_worker(host))
}
}
impl Default for CodeModeService {
fn default() -> Self {
Self::new()
}
}
pub struct CodeModeTurnWorker {}
pub fn default_runtime_factory() -> CodeModeRuntimeFactory {
Arc::new(|| Arc::new(CodeModeService::new()))
}

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "config-loader",
crate_name = "codex_config_loader",
)

View File

@@ -0,0 +1,39 @@
[package]
name = "codex-config-loader"
version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
doctest = false
[lints]
workspace = true
[dependencies]
base64 = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-config = { workspace = true }
codex-exec-server = { workspace = true }
codex-git-utils = { workspace = true }
codex-protocol = { workspace = true }
codex-utils-absolute-path = { workspace = true }
dunce = { workspace = true }
serde = { workspace = true, features = ["derive"] }
tokio = { workspace = true, features = ["rt"] }
toml = { workspace = true }
tracing = { workspace = true }
[target.'cfg(target_os = "macos")'.dependencies]
core-foundation = "0.9"
[target.'cfg(target_os = "windows")'.dependencies]
windows-sys = { version = "0.52", features = [
"Win32_Foundation",
"Win32_System_Com",
"Win32_UI_Shell",
] }
[dev-dependencies]
anyhow = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,76 @@
# `codex-config-loader`
This crate loads and describes Codex configuration layers (user config,
CLI/session overrides, managed config, requirements, and MDM-managed
preferences) and produces:
- An effective merged TOML config.
- Per-key origins metadata.
- Per-layer versions used for optimistic concurrency and conflict detection.
The canonical implementation lives here instead of `codex-core` so callers that
only need config loading do not force the loader implementation into core. The
`codex_core::config_loader` module is a compatibility re-export for existing
callers.
## Public Surface
Exported from `codex_config_loader` and re-exported from
`codex_core::config_loader`:
- `load_config_layers_state(fs, codex_home, cwd_opt, cli_overrides, overrides, cloud_requirements) -> ConfigLayerStack`
- `ConfigLayerStack`
- `effective_config() -> toml::Value`
- `origins() -> HashMap<String, ConfigLayerMetadata>`
- `layers_high_to_low() -> Vec<ConfigLayer>`
- `with_user_config(user_config) -> ConfigLayerStack`
- `ConfigLayerEntry` for one layer's source, config, version, and optional disabled reason.
- `LoaderOverrides` for test and override hooks for managed config sources.
- `merge_toml_values(base, overlay)` for recursive TOML merge.
## Layering Model
Precedence is top overrides bottom:
1. MDM managed preferences on macOS.
2. Legacy managed config.
3. Session flags.
4. Project config layers.
5. User config.
6. System config.
Layers with a `disabled_reason` are still surfaced for UI, but are ignored when
computing the effective config and origins metadata.
## Typical Usage
```rust
use codex_config_loader::{
CloudRequirementsLoader, LoaderOverrides, load_config_layers_state,
};
use codex_exec_server::LOCAL_FS;
use codex_utils_absolute_path::AbsolutePathBuf;
use toml::Value as TomlValue;
let cli_overrides: Vec<(String, TomlValue)> = Vec::new();
let cwd = AbsolutePathBuf::current_dir()?;
let layers = load_config_layers_state(
LOCAL_FS.as_ref(),
&codex_home,
Some(cwd),
&cli_overrides,
LoaderOverrides::default(),
CloudRequirementsLoader::default(),
).await?;
let effective = layers.effective_config();
let origins = layers.origins();
let layers_for_ui = layers.layers_high_to_low();
```
## Internal Layout
- `src/lib.rs`: layer assembly, trust decisions, project config discovery, and path resolution.
- `src/layer_io.rs`: config and managed config reads.
- `src/macos.rs`: managed preferences integration on macOS.
- `codex-config`: owns layer state, requirements, merging, overrides, diagnostics, fingerprints, and config TOML types.

View File

@@ -2,13 +2,8 @@ mod layer_io;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(test)]
mod tests;
use crate::config_loader::layer_io::LoadedConfigLayers;
use codex_app_server_protocol::ConfigLayerSource;
use crate::layer_io::LoadedConfigLayers;
use codex_config::CONFIG_TOML_FILE;
use codex_config::ConfigRequirementsWithSources;
use codex_config::config_toml::ConfigToml;
use codex_config::config_toml::ProjectConfig;
use codex_exec_server::ExecutorFileSystem;
@@ -27,6 +22,7 @@ use std::path::Path;
use std::path::PathBuf;
use toml::Value as TomlValue;
pub use codex_app_server_protocol::ConfigLayerSource;
pub use codex_config::AppRequirementToml;
pub use codex_config::AppsRequirementsToml;
pub use codex_config::CloudRequirementsLoadError;
@@ -39,6 +35,7 @@ pub use codex_config::ConfigLayerStackOrdering;
pub use codex_config::ConfigLoadError;
pub use codex_config::ConfigRequirements;
pub use codex_config::ConfigRequirementsToml;
pub use codex_config::ConfigRequirementsWithSources;
pub use codex_config::ConstrainedWithSource;
pub use codex_config::FeatureRequirementsToml;
pub use codex_config::FilesystemConstraints;
@@ -59,16 +56,15 @@ pub use codex_config::Sourced;
pub use codex_config::TextPosition;
pub use codex_config::TextRange;
pub use codex_config::WebSearchModeRequirement;
pub(crate) use codex_config::build_cli_overrides_layer;
pub(crate) use codex_config::config_error_from_toml;
pub use codex_config::build_cli_overrides_layer;
pub use codex_config::config_error_from_toml;
pub use codex_config::default_project_root_markers;
pub use codex_config::format_config_error;
pub use codex_config::format_config_error_with_source;
pub(crate) use codex_config::io_error_from_config_error;
pub use codex_config::io_error_from_config_error;
pub use codex_config::merge_toml_values;
pub use codex_config::project_root_markers_from_config;
#[cfg(test)]
pub(crate) use codex_config::version_for_toml;
pub use codex_config::version_for_toml;
/// On Unix systems, load default settings from this file path, if present.
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
@@ -78,11 +74,11 @@ pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
#[cfg(windows)]
const DEFAULT_PROGRAM_DATA_DIR_WINDOWS: &str = r"C:\ProgramData";
pub(crate) async fn first_layer_config_error(layers: &ConfigLayerStack) -> Option<ConfigError> {
pub async fn first_layer_config_error(layers: &ConfigLayerStack) -> Option<ConfigError> {
codex_config::first_layer_config_error::<ConfigToml>(layers, CONFIG_TOML_FILE).await
}
pub(crate) async fn first_layer_config_error_from_entries(
pub async fn first_layer_config_error_from_entries(
layers: &[ConfigLayerEntry],
) -> Option<ConfigError> {
codex_config::first_layer_config_error_from_entries::<ConfigToml>(layers, CONFIG_TOML_FILE)
@@ -370,7 +366,7 @@ async fn load_config_toml_for_required_layer(
/// If available, apply requirements from the platform system
/// `requirements.toml` location to `config_requirements_toml` by filling in
/// any unset fields.
async fn load_requirements_toml(
pub async fn load_requirements_toml(
fs: &dyn ExecutorFileSystem,
config_requirements_toml: &mut ConfigRequirementsWithSources,
requirements_toml_file: &AbsolutePathBuf,
@@ -768,7 +764,7 @@ fn project_trust_for_lookup_key(
///
/// This ensures that multiple config layers can be merged together correctly
/// even if they were loaded from different directories.
pub(crate) fn resolve_relative_paths_in_config_toml(
pub fn resolve_relative_paths_in_config_toml(
value_from_config_toml: TomlValue,
base_dir: &Path,
) -> io::Result<TomlValue> {

View File

@@ -17,6 +17,7 @@ codex-network-proxy = { workspace = true }
codex-protocol = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-path = { workspace = true }
dunce = { workspace = true }
futures = { workspace = true, features = ["alloc", "std"] }
multimap = { workspace = true }
schemars = { workspace = true }

View File

@@ -50,7 +50,7 @@ use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::ReadOnlyAccess;
use codex_protocol::protocol::SandboxPolicy;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_path::normalize_for_path_comparison;
use dunce::canonicalize as normalize_path;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Deserializer;
@@ -733,7 +733,7 @@ impl ConfigToml {
fn normalized_project_lookup_keys(path: &Path) -> Vec<String> {
let normalized_path = normalize_project_lookup_key(path.to_string_lossy().to_string());
let normalized_canonical_path = normalize_project_lookup_key(
normalize_for_path_comparison(path)
normalize_path(path)
.unwrap_or_else(|_| path.to_path_buf())
.to_string_lossy()
.to_string(),
@@ -771,6 +771,13 @@ fn project_config_for_lookup_key(
.map(|(_, project_config)| (**project_config).clone())
}
pub(crate) fn project_trust_key(project_path: &Path) -> String {
normalized_project_lookup_keys(project_path)
.into_iter()
.next()
.unwrap_or_else(|| normalize_project_lookup_key(project_path.to_string_lossy().to_string()))
}
pub fn validate_reserved_model_provider_ids(
model_providers: &HashMap<String, ModelProviderInfo>,
) -> Result<(), String> {

1218
codex-rs/config/src/edit.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
use super::*;
use codex_config::types::AppToolApproval;
use codex_config::types::McpServerToolConfig;
use codex_config::types::McpServerTransportConfig;
use crate::types::AppToolApproval;
use crate::types::McpServerToolConfig;
use crate::types::McpServerTransportConfig;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
#[cfg(unix)]

View File

@@ -3,6 +3,7 @@ mod config_requirements;
pub mod config_toml;
mod constraint;
mod diagnostics;
pub mod edit;
mod fingerprint;
mod key_aliases;
mod marketplace_edit;
@@ -61,6 +62,11 @@ pub use diagnostics::first_layer_config_error_from_entries;
pub use diagnostics::format_config_error;
pub use diagnostics::format_config_error_with_source;
pub use diagnostics::io_error_from_config_error;
pub use edit::ConfigEdit;
pub use edit::model_availability_nux_count_edits;
pub use edit::status_line_items_edit;
pub use edit::syntax_theme_edit;
pub use edit::terminal_title_items_edit;
pub use fingerprint::version_for_toml;
pub use marketplace_edit::MarketplaceConfigUpdate;
pub use marketplace_edit::record_user_marketplace;

View File

@@ -1,4 +1,59 @@
load("//:defs.bzl", "codex_rust_crate")
load("@crates//:defs.bzl", "all_crate_deps")
load("@rules_rust//rust:defs.bzl", "rust_test")
load(
"//:defs.bzl",
"WINDOWS_RUSTC_LINK_FLAGS",
"codex_rust_crate",
"workspace_root_test",
)
CORE_COMPILE_DATA = [
"//codex-rs:node-version.txt",
"hierarchical_agents_message.md",
"review_prompt.md",
"src/agent/builtins/awaiter.toml",
"src/agent/builtins/explorer.toml",
"src/guardian/policy.md",
"src/guardian/policy_template.md",
"templates/compact/prompt.md",
"templates/compact/summary_prefix.md",
"templates/realtime/backend_prompt.md",
"templates/review/exit_interrupted.xml",
"templates/review/exit_success.xml",
]
CORE_TEST_DATA = [
"config.schema.json",
] + glob([
"src/**/snapshots/**",
]) + [
# This is a bit of a hack, but empirically, some of our integration tests
# are relying on the presence of this file as a repo root marker. When
# running tests locally, this "just works," but in remote execution,
# the working directory is different and so the file is not found unless it
# is explicitly added as test data.
#
# TODO(aibrahim): Update the tests so that `just bazel-remote-test`
# succeeds without this workaround.
"//:AGENTS.md",
]
CORE_LIB_SRCS = glob(
["src/**/*.rs"],
exclude = [
"src/**/*_tests.rs",
"src/**/tests.rs",
"src/**/mod_tests.rs",
"src/core_unit_tests.rs",
],
)
CORE_UNIT_TEST_SRCS = glob(["src/**/*.rs"]) + glob(["tests/unit/**/*.rs"])
filegroup(
name = "core_test_data",
srcs = CORE_TEST_DATA,
)
filegroup(
name = "model_availability_nux_fixtures",
@@ -11,17 +66,8 @@ filegroup(
codex_rust_crate(
name = "core",
crate_name = "codex_core",
compile_data = glob(
include = ["**"],
exclude = [
"**/* *",
"BUILD.bazel",
"Cargo.toml",
],
allow_empty = True,
) + [
"//codex-rs:node-version.txt",
],
crate_srcs = CORE_LIB_SRCS,
compile_data = CORE_COMPILE_DATA,
rustc_env = {
# Keep manifest-root path lookups inside the Bazel execroot for code
# that relies on env!("CARGO_MANIFEST_DIR").
@@ -29,30 +75,18 @@ codex_rust_crate(
},
integration_compile_data_extra = [
"//codex-rs/apply-patch:apply_patch_tool_instructions.md",
"prompt_with_apply_patch_instructions.md",
"templates/realtime/backend_prompt.md",
],
integration_test_timeout = "long",
test_data_extra = [
"config.schema.json",
] + glob([
"src/**/snapshots/**",
]) + [
# This is a bit of a hack, but empirically, some of our integration tests
# are relying on the presence of this file as a repo root marker. When
# running tests locally, this "just works," but in remote execution,
# the working directory is different and so the file is not found unless it
# is explicitly added as test data.
#
# TODO(aibrahim): Update the tests so that `just bazel-remote-test`
# succeeds without this workaround.
"//:AGENTS.md",
],
test_data_extra = CORE_TEST_DATA,
test_shard_counts = {
"core-all-test": 8,
"core-unit-tests": 8,
},
test_tags = ["no-sandbox"],
unit_test_timeout = "long",
generate_unit_tests = False,
extra_binaries = [
"//codex-rs/linux-sandbox:codex-linux-sandbox",
"//codex-rs/rmcp-client:test_stdio_server",
@@ -61,3 +95,46 @@ codex_rust_crate(
"//codex-rs/cli:codex",
],
)
rust_test(
name = "core-unit-tests-bin",
crate_name = "codex_core",
crate_root = "src/lib.rs",
compile_data = CORE_COMPILE_DATA + [
"prompt_with_apply_patch_instructions.md",
],
data = [":core_test_data"],
deps = all_crate_deps(normal = True, normal_dev = True),
edition = "2024",
experimental_enable_sharding = True,
rustc_env = {
"BAZEL_PACKAGE": "codex-rs/core",
# Keep manifest-root path lookups inside the Bazel execroot for code
# that relies on env!("CARGO_MANIFEST_DIR").
"CARGO_MANIFEST_DIR": "codex-rs/core",
},
rustc_flags = WINDOWS_RUSTC_LINK_FLAGS + [
"--remap-path-prefix=../codex-rs=",
"--remap-path-prefix=codex-rs=",
],
srcs = CORE_UNIT_TEST_SRCS,
tags = [
"manual",
"no-sandbox",
],
)
workspace_root_test(
name = "core-unit-tests",
data = [":core_test_data"],
env = {
"INSTA_SNAPSHOT_PATH": "src",
"INSTA_WORKSPACE_ROOT": ".",
},
flaky = True,
shard_count = 8,
tags = ["no-sandbox"],
test_bin = ":core-unit-tests-bin",
timeout = "long",
workspace_root_marker = "//codex-rs/utils/cargo-bin:repo_root.marker",
)

View File

@@ -26,6 +26,7 @@ bm25 = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
clap = { workspace = true, features = ["derive"] }
codex-analytics = { workspace = true }
codex-agent-runtime = { workspace = true }
codex-api = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-apply-patch = { workspace = true }
@@ -33,14 +34,19 @@ codex-async-utils = { workspace = true }
codex-code-mode = { workspace = true }
codex-connectors = { workspace = true }
codex-config = { workspace = true }
codex-config-loader = { workspace = true }
codex-core-plugins = { workspace = true }
codex-core-skills = { workspace = true }
crypto_box = { workspace = true }
codex-exec-server = { workspace = true }
codex-features = { workspace = true }
codex-feedback = { workspace = true }
codex-file-watcher = { workspace = true }
codex-login = { workspace = true }
codex-mcp = { workspace = true }
codex-message-history = { workspace = true }
codex-memory-prompts = { workspace = true }
codex-mcp-tool-approval = { workspace = true }
codex-model-provider-info = { workspace = true }
codex-models-manager = { workspace = true }
ed25519-dalek = { workspace = true }
@@ -49,18 +55,22 @@ codex-execpolicy = { workspace = true }
codex-git-utils = { workspace = true }
codex-hooks = { workspace = true }
codex-instructions = { workspace = true }
codex-js-repl = { workspace = true }
codex-network-proxy = { workspace = true }
codex-otel = { workspace = true }
codex-plugin = { workspace = true }
codex-model-provider = { workspace = true }
codex-protocol = { workspace = true }
codex-response-debug-context = { workspace = true }
codex-review = { workspace = true }
codex-rollout = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-sandboxing = { workspace = true }
codex-session-runtime = { workspace = true }
codex-state = { workspace = true }
codex-terminal-detection = { workspace = true }
codex-thread-store = { workspace = true }
codex-tool-spec = { workspace = true }
codex-tools = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-cache = { workspace = true }
@@ -87,7 +97,6 @@ iana-time-zone = { workspace = true }
image = { workspace = true, features = ["jpeg", "png", "webp"] }
indexmap = { workspace = true }
libc = { workspace = true }
notify = { workspace = true }
once_cell = { workspace = true }
rand = { workspace = true }
regex-lite = { workspace = true }
@@ -125,9 +134,6 @@ which = { workspace = true }
whoami = { workspace = true }
zip = { workspace = true }
[target.'cfg(target_os = "macos")'.dependencies]
core-foundation = "0.9"
# Build OpenSSL from source for musl builds.
[target.x86_64-unknown-linux-musl.dependencies]
openssl-sys = { workspace = true, features = ["vendored"] }
@@ -136,13 +142,6 @@ openssl-sys = { workspace = true, features = ["vendored"] }
[target.aarch64-unknown-linux-musl.dependencies]
openssl-sys = { workspace = true, features = ["vendored"] }
[target.'cfg(target_os = "windows")'.dependencies]
windows-sys = { version = "0.52", features = [
"Win32_Foundation",
"Win32_System_Com",
"Win32_UI_Shell",
] }
[target.'cfg(unix)'.dependencies]
codex-shell-escalation = { workspace = true }

View File

@@ -14,21 +14,27 @@ use crate::session_prefix::format_subagent_notification_message;
use crate::shell_snapshot::ShellSnapshot;
use crate::thread_manager::ThreadManagerState;
use crate::thread_rollout_truncation::truncate_rollout_to_last_n_fork_turns;
pub(crate) use codex_agent_runtime::LiveAgent;
pub(crate) use codex_agent_runtime::SpawnAgentForkMode;
pub(crate) use codex_agent_runtime::SpawnAgentOptions;
use codex_agent_runtime::agent_matches_prefix;
use codex_agent_runtime::keep_forked_rollout_item;
pub(crate) use codex_agent_runtime::render_input_preview;
use codex_agent_runtime::thread_spawn_depth;
use codex_agent_runtime::thread_spawn_parent_thread_id;
use codex_features::Feature;
use codex_protocol::AgentPath;
use codex_protocol::ThreadId;
use codex_protocol::error::CodexErr;
use codex_protocol::error::Result as CodexResult;
use codex_protocol::models::MessagePhase;
#[cfg(test)]
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::InitialHistory;
use codex_protocol::protocol::InterAgentCommunication;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_protocol::protocol::TokenUsage;
use codex_protocol::user_input::UserInput;
use codex_rollout::state_db;
use codex_state::DirectionalThreadSpawnEdgeStatus;
use serde::Serialize;
@@ -39,28 +45,8 @@ use std::sync::Weak;
use tokio::sync::watch;
use tracing::warn;
const AGENT_NAMES: &str = include_str!("agent_names.txt");
const ROOT_LAST_TASK_MESSAGE: &str = "Main thread";
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum SpawnAgentForkMode {
FullHistory,
LastNTurns(usize),
}
#[derive(Clone, Debug, Default)]
pub(crate) struct SpawnAgentOptions {
pub(crate) fork_parent_spawn_call_id: Option<String>,
pub(crate) fork_mode: Option<SpawnAgentForkMode>,
}
#[derive(Clone, Debug)]
pub(crate) struct LiveAgent {
pub(crate) thread_id: ThreadId,
pub(crate) metadata: AgentMetadata,
pub(crate) status: AgentStatus,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
pub(crate) struct ListedAgent {
pub(crate) agent_name: String,
@@ -69,11 +55,7 @@ pub(crate) struct ListedAgent {
}
fn default_agent_nickname_list() -> Vec<&'static str> {
AGENT_NAMES
.lines()
.map(str::trim)
.filter(|name| !name.is_empty())
.collect()
codex_agent_runtime::default_agent_nickname_list()
}
fn agent_nickname_candidates(
@@ -93,36 +75,6 @@ fn agent_nickname_candidates(
.collect()
}
fn keep_forked_rollout_item(item: &RolloutItem) -> bool {
match item {
RolloutItem::ResponseItem(ResponseItem::Message { role, phase, .. }) => match role.as_str()
{
"system" | "developer" | "user" => true,
"assistant" => *phase == Some(MessagePhase::FinalAnswer),
_ => false,
},
RolloutItem::ResponseItem(
ResponseItem::Reasoning { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::ToolSearchCall { .. }
| ResponseItem::FunctionCallOutput { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::CustomToolCallOutput { .. }
| ResponseItem::ToolSearchOutput { .. }
| ResponseItem::WebSearchCall { .. }
| ResponseItem::ImageGenerationCall { .. }
| ResponseItem::GhostSnapshot { .. }
| ResponseItem::Compaction { .. }
| ResponseItem::Other,
) => false,
RolloutItem::Compacted(_)
| RolloutItem::EventMsg(_)
| RolloutItem::SessionMeta(_)
| RolloutItem::TurnContext(_) => true,
}
}
/// Control-plane handle for multi-agent operations.
/// `AgentControl` is held by each session (via `SessionServices`). It provides capability to
/// spawn new agents and the inter-agent communication layer.
@@ -1160,54 +1112,6 @@ impl AgentControl {
}
}
fn thread_spawn_parent_thread_id(session_source: &SessionSource) -> Option<ThreadId> {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id, ..
}) => Some(*parent_thread_id),
_ => None,
}
}
fn agent_matches_prefix(agent_path: Option<&AgentPath>, prefix: &AgentPath) -> bool {
if prefix.is_root() {
return true;
}
agent_path.is_some_and(|agent_path| {
agent_path == prefix
|| agent_path
.as_str()
.strip_prefix(prefix.as_str())
.is_some_and(|suffix| suffix.starts_with('/'))
})
}
pub(crate) fn render_input_preview(initial_operation: &Op) -> String {
match initial_operation {
Op::UserInput { items, .. } => items
.iter()
.map(|item| match item {
UserInput::Text { text, .. } => text.clone(),
UserInput::Image { .. } => "[image]".to_string(),
UserInput::LocalImage { path } => format!("[local_image:{}]", path.display()),
UserInput::Skill { name, path } => format!("[skill:${name}]({})", path.display()),
UserInput::Mention { name, path } => format!("[mention:${name}]({path})"),
_ => "[input]".to_string(),
})
.collect::<Vec<_>>()
.join("\n"),
Op::InterAgentCommunication { communication } => communication.content.clone(),
_ => String::new(),
}
}
fn thread_spawn_depth(session_source: &SessionSource) -> Option<i32> {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { depth, .. }) => Some(*depth),
_ => None,
}
}
#[cfg(test)]
#[path = "control_tests.rs"]
#[path = "../../tests/unit/agent/control_tests.rs"]
mod tests;

View File

@@ -1,161 +1 @@
use codex_protocol::protocol::InterAgentCommunication;
use std::collections::VecDeque;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc;
use tokio::sync::watch;
#[cfg(test)]
use codex_protocol::AgentPath;
pub(crate) struct Mailbox {
tx: mpsc::UnboundedSender<InterAgentCommunication>,
next_seq: AtomicU64,
seq_tx: watch::Sender<u64>,
}
pub(crate) struct MailboxReceiver {
rx: mpsc::UnboundedReceiver<InterAgentCommunication>,
pending_mails: VecDeque<InterAgentCommunication>,
}
impl Mailbox {
pub(crate) fn new() -> (Self, MailboxReceiver) {
let (tx, rx) = mpsc::unbounded_channel();
let (seq_tx, _) = watch::channel(0);
(
Self {
tx,
next_seq: AtomicU64::new(0),
seq_tx,
},
MailboxReceiver {
rx,
pending_mails: VecDeque::new(),
},
)
}
pub(crate) fn subscribe(&self) -> watch::Receiver<u64> {
self.seq_tx.subscribe()
}
pub(crate) fn send(&self, communication: InterAgentCommunication) -> u64 {
let seq = self.next_seq.fetch_add(1, Ordering::Relaxed) + 1;
let _ = self.tx.send(communication);
self.seq_tx.send_replace(seq);
seq
}
}
impl MailboxReceiver {
fn sync_pending_mails(&mut self) {
while let Ok(mail) = self.rx.try_recv() {
self.pending_mails.push_back(mail);
}
}
pub(crate) fn has_pending(&mut self) -> bool {
self.sync_pending_mails();
!self.pending_mails.is_empty()
}
pub(crate) fn has_pending_trigger_turn(&mut self) -> bool {
self.sync_pending_mails();
self.pending_mails.iter().any(|mail| mail.trigger_turn)
}
pub(crate) fn drain(&mut self) -> Vec<InterAgentCommunication> {
self.sync_pending_mails();
self.pending_mails.drain(..).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn make_mail(
author: AgentPath,
recipient: AgentPath,
content: &str,
trigger_turn: bool,
) -> InterAgentCommunication {
InterAgentCommunication::new(
author,
recipient,
Vec::new(),
content.to_string(),
trigger_turn,
)
}
#[tokio::test]
async fn mailbox_assigns_monotonic_sequence_numbers() {
let (mailbox, _receiver) = Mailbox::new();
let mut seq_rx = mailbox.subscribe();
let seq_a = mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"one",
/*trigger_turn*/ false,
));
let seq_b = mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"two",
/*trigger_turn*/ false,
));
seq_rx.changed().await.expect("first seq update");
assert_eq!(*seq_rx.borrow(), seq_b);
assert_eq!(seq_a, 1);
assert_eq!(seq_b, 2);
}
#[tokio::test]
async fn mailbox_drains_in_delivery_order() {
let (mailbox, mut receiver) = Mailbox::new();
let mail_one = make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"one",
/*trigger_turn*/ false,
);
let mail_two = make_mail(
AgentPath::try_from("/root/worker").expect("agent path"),
AgentPath::root(),
"two",
/*trigger_turn*/ false,
);
mailbox.send(mail_one.clone());
mailbox.send(mail_two.clone());
assert_eq!(receiver.drain(), vec![mail_one, mail_two]);
assert!(!receiver.has_pending());
}
#[tokio::test]
async fn mailbox_tracks_pending_trigger_turn_mail() {
let (mailbox, mut receiver) = Mailbox::new();
mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"queued",
/*trigger_turn*/ false,
));
assert!(!receiver.has_pending_trigger_turn());
mailbox.send(make_mail(
AgentPath::root(),
AgentPath::try_from("/root/worker").expect("agent path"),
"wake",
/*trigger_turn*/ true,
));
assert!(receiver.has_pending_trigger_turn());
}
}
pub(crate) use codex_agent_runtime::mailbox::*;

View File

@@ -1,344 +1 @@
use codex_protocol::AgentPath;
use codex_protocol::ThreadId;
use codex_protocol::error::CodexErr;
use codex_protocol::error::Result;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use rand::prelude::IndexedRandom;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
/// This structure is used to add some limits on the multi-agent capabilities for Codex. In
/// the current implementation, it limits:
/// * Total number of sub-agents (i.e. threads) per user session
///
/// This structure is shared by all agents in the same user session (because the `AgentControl`
/// is).
#[derive(Default)]
pub(crate) struct AgentRegistry {
active_agents: Mutex<ActiveAgents>,
total_count: AtomicUsize,
}
#[derive(Default)]
struct ActiveAgents {
agent_tree: HashMap<String, AgentMetadata>,
used_agent_nicknames: HashSet<String>,
nickname_reset_count: usize,
}
#[derive(Clone, Debug, Default)]
pub(crate) struct AgentMetadata {
pub(crate) agent_id: Option<ThreadId>,
pub(crate) agent_path: Option<AgentPath>,
pub(crate) agent_nickname: Option<String>,
pub(crate) agent_role: Option<String>,
pub(crate) last_task_message: Option<String>,
}
fn format_agent_nickname(name: &str, nickname_reset_count: usize) -> String {
match nickname_reset_count {
0 => name.to_string(),
reset_count => {
let value = reset_count + 1;
let suffix = match value % 100 {
11..=13 => "th",
_ => match value % 10 {
1 => "st", // codespell:ignore
2 => "nd", // codespell:ignore
3 => "rd", // codespell:ignore
_ => "th", // codespell:ignore
},
};
format!("{name} the {value}{suffix}")
}
}
}
fn session_depth(session_source: &SessionSource) -> i32 {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { depth, .. }) => *depth,
SessionSource::SubAgent(_) => 0,
_ => 0,
}
}
pub(crate) fn next_thread_spawn_depth(session_source: &SessionSource) -> i32 {
session_depth(session_source).saturating_add(1)
}
pub(crate) fn exceeds_thread_spawn_depth_limit(depth: i32, max_depth: i32) -> bool {
depth > max_depth
}
impl AgentRegistry {
pub(crate) fn reserve_spawn_slot(
self: &Arc<Self>,
max_threads: Option<usize>,
) -> Result<SpawnReservation> {
if let Some(max_threads) = max_threads {
if !self.try_increment_spawned(max_threads) {
return Err(CodexErr::AgentLimitReached { max_threads });
}
} else {
self.total_count.fetch_add(1, Ordering::AcqRel);
}
Ok(SpawnReservation {
state: Arc::clone(self),
active: true,
reserved_agent_nickname: None,
reserved_agent_path: None,
})
}
pub(crate) fn release_spawned_thread(&self, thread_id: ThreadId) {
let removed_counted_agent = {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let removed_key = active_agents
.agent_tree
.iter()
.find_map(|(key, metadata)| (metadata.agent_id == Some(thread_id)).then_some(key))
.cloned();
removed_key
.and_then(|key| active_agents.agent_tree.remove(key.as_str()))
.is_some_and(|metadata| {
!metadata.agent_path.as_ref().is_some_and(AgentPath::is_root)
})
};
if removed_counted_agent {
self.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
pub(crate) fn register_root_thread(&self, thread_id: ThreadId) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
active_agents
.agent_tree
.entry(AgentPath::ROOT.to_string())
.or_insert_with(|| AgentMetadata {
agent_id: Some(thread_id),
agent_path: Some(AgentPath::root()),
..Default::default()
});
}
pub(crate) fn agent_id_for_path(&self, agent_path: &AgentPath) -> Option<ThreadId> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.get(agent_path.as_str())
.and_then(|metadata| metadata.agent_id)
}
pub(crate) fn agent_metadata_for_thread(&self, thread_id: ThreadId) -> Option<AgentMetadata> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.values()
.find(|metadata| metadata.agent_id == Some(thread_id))
.cloned()
}
pub(crate) fn live_agents(&self) -> Vec<AgentMetadata> {
self.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.agent_tree
.values()
.filter(|metadata| {
metadata.agent_id.is_some()
&& !metadata.agent_path.as_ref().is_some_and(AgentPath::is_root)
})
.cloned()
.collect()
}
pub(crate) fn update_last_task_message(&self, thread_id: ThreadId, last_task_message: String) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
if let Some(metadata) = active_agents
.agent_tree
.values_mut()
.find(|metadata| metadata.agent_id == Some(thread_id))
{
metadata.last_task_message = Some(last_task_message);
}
}
fn register_spawned_thread(&self, agent_metadata: AgentMetadata) {
let Some(thread_id) = agent_metadata.agent_id else {
return;
};
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let key = agent_metadata
.agent_path
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| format!("thread:{thread_id}"));
if let Some(agent_nickname) = agent_metadata.agent_nickname.clone() {
active_agents.used_agent_nicknames.insert(agent_nickname);
}
active_agents.agent_tree.insert(key, agent_metadata);
}
fn reserve_agent_nickname(&self, names: &[&str], preferred: Option<&str>) -> Option<String> {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let agent_nickname = if let Some(preferred) = preferred {
preferred.to_string()
} else {
if names.is_empty() {
return None;
}
let available_names: Vec<String> = names
.iter()
.map(|name| format_agent_nickname(name, active_agents.nickname_reset_count))
.filter(|name| !active_agents.used_agent_nicknames.contains(name))
.collect();
if let Some(name) = available_names.choose(&mut rand::rng()) {
name.clone()
} else {
active_agents.used_agent_nicknames.clear();
active_agents.nickname_reset_count += 1;
if let Some(metrics) = codex_otel::global() {
let _ = metrics.counter(
"codex.multi_agent.nickname_pool_reset",
/*inc*/ 1,
&[],
);
}
format_agent_nickname(
names.choose(&mut rand::rng())?,
active_agents.nickname_reset_count,
)
}
};
active_agents
.used_agent_nicknames
.insert(agent_nickname.clone());
Some(agent_nickname)
}
fn reserve_agent_path(&self, agent_path: &AgentPath) -> Result<()> {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
match active_agents.agent_tree.entry(agent_path.to_string()) {
Entry::Occupied(_) => Err(CodexErr::UnsupportedOperation(format!(
"agent path `{agent_path}` already exists"
))),
Entry::Vacant(entry) => {
entry.insert(AgentMetadata {
agent_path: Some(agent_path.clone()),
..Default::default()
});
Ok(())
}
}
}
fn release_reserved_agent_path(&self, agent_path: &AgentPath) {
let mut active_agents = self
.active_agents
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
if active_agents
.agent_tree
.get(agent_path.as_str())
.is_some_and(|metadata| metadata.agent_id.is_none())
{
active_agents.agent_tree.remove(agent_path.as_str());
}
}
fn try_increment_spawned(&self, max_threads: usize) -> bool {
let mut current = self.total_count.load(Ordering::Acquire);
loop {
if current >= max_threads {
return false;
}
match self.total_count.compare_exchange_weak(
current,
current + 1,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(updated) => current = updated,
}
}
}
}
pub(crate) struct SpawnReservation {
state: Arc<AgentRegistry>,
active: bool,
reserved_agent_nickname: Option<String>,
reserved_agent_path: Option<AgentPath>,
}
impl SpawnReservation {
pub(crate) fn reserve_agent_nickname_with_preference(
&mut self,
names: &[&str],
preferred: Option<&str>,
) -> Result<String> {
let agent_nickname = self
.state
.reserve_agent_nickname(names, preferred)
.ok_or_else(|| {
CodexErr::UnsupportedOperation("no available agent nicknames".to_string())
})?;
self.reserved_agent_nickname = Some(agent_nickname.clone());
Ok(agent_nickname)
}
pub(crate) fn reserve_agent_path(&mut self, agent_path: &AgentPath) -> Result<()> {
self.state.reserve_agent_path(agent_path)?;
self.reserved_agent_path = Some(agent_path.clone());
Ok(())
}
pub(crate) fn commit(mut self, agent_metadata: AgentMetadata) {
self.reserved_agent_nickname = None;
self.reserved_agent_path = None;
self.state.register_spawned_thread(agent_metadata);
self.active = false;
}
}
impl Drop for SpawnReservation {
fn drop(&mut self) {
if self.active {
if let Some(agent_path) = self.reserved_agent_path.take() {
self.state.release_reserved_agent_path(&agent_path);
}
self.state.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
}
#[cfg(test)]
#[path = "registry_tests.rs"]
mod tests;
pub(crate) use codex_agent_runtime::registry::*;

View File

@@ -1,27 +1 @@
use codex_protocol::protocol::AgentStatus;
use codex_protocol::protocol::EventMsg;
/// Derive the next agent status from a single emitted event.
/// Returns `None` when the event does not affect status tracking.
pub(crate) fn agent_status_from_event(msg: &EventMsg) -> Option<AgentStatus> {
match msg {
EventMsg::TurnStarted(_) => Some(AgentStatus::Running),
EventMsg::TurnComplete(ev) => Some(AgentStatus::Completed(ev.last_agent_message.clone())),
EventMsg::TurnAborted(ev) => match ev.reason {
codex_protocol::protocol::TurnAbortReason::Interrupted => {
Some(AgentStatus::Interrupted)
}
_ => Some(AgentStatus::Errored(format!("{:?}", ev.reason))),
},
EventMsg::Error(ev) => Some(AgentStatus::Errored(ev.message.clone())),
EventMsg::ShutdownComplete => Some(AgentStatus::Shutdown),
_ => None,
}
}
pub(crate) fn is_final(status: &AgentStatus) -> bool {
!matches!(
status,
AgentStatus::PendingInit | AgentStatus::Running | AgentStatus::Interrupted
)
}
pub(crate) use codex_agent_runtime::status::*;

View File

@@ -97,6 +97,7 @@ pub(crate) async fn run_codex_thread_interactive(
inherited_exec_policy: Some(Arc::clone(&parent_session.services.exec_policy)),
parent_trace: None,
analytics_events_client: Some(parent_session.services.analytics_events_client.clone()),
code_mode_runtime_factory: Arc::clone(&parent_session.services.code_mode_runtime_factory),
}))
.await?;
if parent_session.enabled(codex_features::Feature::GeneralAnalytics) {

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,6 @@ use crate::config_loader::McpServerRequirement;
use crate::config_loader::ResidencyRequirement;
use crate::config_loader::Sourced;
use crate::config_loader::load_config_layers_state;
use crate::config_loader::project_trust_key;
use crate::memories::memory_root;
use crate::path_utils::normalize_for_native_workdir;
use crate::unified_exec::DEFAULT_MAX_BACKGROUND_TERMINAL_TIMEOUT_MS;
@@ -95,7 +94,6 @@ use crate::config::permissions::get_readable_roots_required_for_codex_runtime;
use crate::config::permissions::network_proxy_config_from_profile_network;
use codex_network_proxy::NetworkProxyConfig;
use toml::Value as TomlValue;
use toml_edit::DocumentMut;
pub(crate) mod agent_roles;
pub mod edit;
@@ -1068,75 +1066,6 @@ fn ensure_no_inline_bearer_tokens(value: &TomlValue) -> std::io::Result<()> {
Ok(())
}
pub(crate) fn set_project_trust_level_inner(
doc: &mut DocumentMut,
project_path: &Path,
trust_level: TrustLevel,
) -> anyhow::Result<()> {
// Ensure we render a human-friendly structure:
//
// [projects]
// [projects."/path/to/project"]
// trust_level = "trusted" or "untrusted"
//
// rather than inline tables like:
//
// [projects]
// "/path/to/project" = { trust_level = "trusted" }
let project_key = project_trust_key(project_path);
// Ensure top-level `projects` exists as a non-inline, explicit table. If it
// exists but was previously represented as a non-table (e.g., inline),
// replace it with an explicit table.
{
let root = doc.as_table_mut();
// If `projects` exists but isn't a standard table (e.g., it's an inline table),
// convert it to an explicit table while preserving existing entries.
let existing_projects = root.get("projects").cloned();
if existing_projects.as_ref().is_none_or(|i| !i.is_table()) {
let mut projects_tbl = toml_edit::Table::new();
projects_tbl.set_implicit(true);
// If there was an existing inline table, migrate its entries to explicit tables.
if let Some(inline_tbl) = existing_projects.as_ref().and_then(|i| i.as_inline_table()) {
for (k, v) in inline_tbl.iter() {
if let Some(inner_tbl) = v.as_inline_table() {
let new_tbl = inner_tbl.clone().into_table();
projects_tbl.insert(k, toml_edit::Item::Table(new_tbl));
}
}
}
root.insert("projects", toml_edit::Item::Table(projects_tbl));
}
}
let Some(projects_tbl) = doc["projects"].as_table_mut() else {
return Err(anyhow::anyhow!(
"projects table missing after initialization"
));
};
// Ensure the per-project entry is its own explicit table. If it exists but
// is not a table (e.g., an inline table), replace it with an explicit table.
let needs_proj_table = !projects_tbl.contains_key(project_key.as_str())
|| projects_tbl
.get(project_key.as_str())
.and_then(|i| i.as_table())
.is_none();
if needs_proj_table {
projects_tbl.insert(project_key.as_str(), toml_edit::table());
}
let Some(proj_tbl) = projects_tbl
.get_mut(project_key.as_str())
.and_then(|i| i.as_table_mut())
else {
return Err(anyhow::anyhow!("project table missing for {project_key}"));
};
proj_tbl.set_implicit(false);
proj_tbl["trust_level"] = toml_edit::value(trust_level.to_string());
Ok(())
}
/// Patch `CODEX_HOME/config.toml` project state to set trust level.
/// Use with caution.
pub fn set_project_trust_level(
@@ -2441,5 +2370,5 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
}
#[cfg(test)]
#[path = "config_tests.rs"]
#[path = "../../tests/unit/config/config_tests.rs"]
mod tests;

View File

@@ -0,0 +1,5 @@
pub use codex_config_loader::*;
#[cfg(test)]
#[path = "config_loader/tests.rs"]
mod tests;

View File

@@ -1,73 +0,0 @@
# `codex-core` config loader
This module is the canonical place to **load and describe Codex configuration layers** (user config, CLI/session overrides, managed config, and MDM-managed preferences) and to produce:
- An **effective merged** TOML config.
- **Per-key origins** metadata (which layer “wins” for a given key).
- **Per-layer versions** (stable fingerprints) used for optimistic concurrency / conflict detection.
## Public surface
Exported from `codex_core::config_loader`:
- `load_config_layers_state(fs, codex_home, cwd_opt, cli_overrides, overrides, cloud_requirements) -> ConfigLayerStack`
- `ConfigLayerStack`
- `effective_config() -> toml::Value`
- `origins() -> HashMap<String, ConfigLayerMetadata>`
- `layers_high_to_low() -> Vec<ConfigLayer>`
- `with_user_config(user_config) -> ConfigLayerStack`
- `ConfigLayerEntry` (one layers `{name, config, version, disabled_reason}`; `name` carries source metadata)
- `LoaderOverrides` (test/override hooks for managed config sources)
- `merge_toml_values(base, overlay)` (public helper used elsewhere)
## Layering model
Precedence is **top overrides bottom**:
1. **MDM** managed preferences (macOS only)
2. **System** managed config (e.g. `managed_config.toml`)
3. **Session flags** (CLI overrides, applied as dotted-path TOML writes)
4. **User** config (`config.toml`)
Layers with a `disabled_reason` are still surfaced for UI, but are ignored when
computing the effective config and origins metadata. This is what
`ConfigLayerStack::effective_config()` implements.
## Typical usage
Most callers want the effective config plus metadata:
```rust
use codex_core::config_loader::{
CloudRequirementsLoader, LoaderOverrides, load_config_layers_state,
};
use codex_exec_server::LOCAL_FS;
use codex_utils_absolute_path::AbsolutePathBuf;
use toml::Value as TomlValue;
let cli_overrides: Vec<(String, TomlValue)> = Vec::new();
let cwd = AbsolutePathBuf::current_dir()?;
let layers = load_config_layers_state(
LOCAL_FS.as_ref(),
&codex_home,
Some(cwd),
&cli_overrides,
LoaderOverrides::default(),
CloudRequirementsLoader::default(),
).await?;
let effective = layers.effective_config();
let origins = layers.origins();
let layers_for_ui = layers.layers_high_to_low();
```
## Internal layout
Implementation is split by concern:
- `state.rs`: public types (`ConfigLayerEntry`, `ConfigLayerStack`) + merge/origins convenience methods.
- `layer_io.rs`: reading `config.toml`, managed config, and managed preferences inputs.
- `overrides.rs`: CLI dotted-path overrides → TOML “session flags” layer.
- `merge.rs`: recursive TOML merge.
- `fingerprint.rs`: stable per-layer hashing and per-key origins traversal.
- `macos.rs`: managed preferences integration (macOS only).

View File

@@ -724,5 +724,5 @@ fn user_message_positions(items: &[ResponseItem]) -> Vec<usize> {
}
#[cfg(test)]
#[path = "history_tests.rs"]
#[path = "../../tests/unit/context_manager/history_tests.rs"]
mod tests;

View File

@@ -0,0 +1,13 @@
mod tools_spec {
pub(crate) use crate::tools::registry::ToolRegistryBuilder;
pub(crate) use crate::tools::spec::build_specs_with_discoverable_tools;
pub(crate) use crate::tools::spec::tool_user_shell_type;
pub(crate) use codex_mcp::ToolInfo;
pub(crate) use codex_protocol::dynamic_tools::DynamicToolSpec;
mod tests {
use std::collections::HashMap;
include!("../tests/unit/tools/spec_tests.rs");
}
}

View File

@@ -890,5 +890,5 @@ async fn collect_policy_files(dir: impl AsRef<Path>) -> Result<Vec<PathBuf>, Exe
}
#[cfg(test)]
#[path = "exec_policy_tests.rs"]
#[path = "../tests/unit/exec_policy_tests.rs"]
mod tests;

View File

@@ -1,588 +1,9 @@
//! Watches subscribed files or directories and routes coarse-grained change
//! notifications to the subscribers that own matching watched paths.
//! Compatibility re-exports for the generic file watcher crate.
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::RwLock;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::time::Duration;
use notify::Event;
use notify::EventKind;
use notify::RecommendedWatcher;
use notify::RecursiveMode;
use notify::Watcher;
use tokio::runtime::Handle;
use tokio::sync::Mutex as AsyncMutex;
use tokio::sync::Notify;
use tokio::sync::mpsc;
use tokio::time::Instant;
use tokio::time::sleep_until;
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq)]
/// Coalesced file change notification for a subscriber.
pub struct FileWatcherEvent {
/// Changed paths delivered in sorted order with duplicates removed.
pub paths: Vec<PathBuf>,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// Path subscription registered by a [`FileWatcherSubscriber`].
pub struct WatchPath {
/// Root path to watch.
pub path: PathBuf,
/// Whether events below `path` should match recursively.
pub recursive: bool,
}
type SubscriberId = u64;
#[derive(Default)]
struct WatchState {
next_subscriber_id: SubscriberId,
path_ref_counts: HashMap<PathBuf, PathWatchCounts>,
subscribers: HashMap<SubscriberId, SubscriberState>,
}
struct SubscriberState {
watched_paths: HashMap<WatchPath, usize>,
tx: WatchSender,
}
/// Receives coalesced change notifications for a single subscriber.
pub struct Receiver {
inner: Arc<ReceiverInner>,
}
struct WatchSender {
inner: Arc<ReceiverInner>,
}
struct ReceiverInner {
changed_paths: AsyncMutex<BTreeSet<PathBuf>>,
notify: Notify,
sender_count: AtomicUsize,
}
impl Receiver {
/// Waits for the next batch of changed paths, or returns `None` once the
/// corresponding subscriber has been removed and no more events can arrive.
pub async fn recv(&mut self) -> Option<FileWatcherEvent> {
loop {
let notified = self.inner.notify.notified();
{
let mut changed_paths = self.inner.changed_paths.lock().await;
if !changed_paths.is_empty() {
return Some(FileWatcherEvent {
paths: std::mem::take(&mut *changed_paths).into_iter().collect(),
});
}
if self.inner.sender_count.load(Ordering::Acquire) == 0 {
return None;
}
}
notified.await;
}
}
}
impl WatchSender {
async fn add_changed_paths(&self, paths: &[PathBuf]) {
if paths.is_empty() {
return;
}
let mut changed_paths = self.inner.changed_paths.lock().await;
let previous_len = changed_paths.len();
changed_paths.extend(paths.iter().cloned());
if changed_paths.len() != previous_len {
self.inner.notify.notify_one();
}
}
}
impl Clone for WatchSender {
fn clone(&self) -> Self {
self.inner.sender_count.fetch_add(1, Ordering::Relaxed);
Self {
inner: Arc::clone(&self.inner),
}
}
}
impl Drop for WatchSender {
fn drop(&mut self) {
if self.inner.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 {
self.inner.notify.notify_waiters();
}
}
}
fn watch_channel() -> (WatchSender, Receiver) {
let inner = Arc::new(ReceiverInner {
changed_paths: AsyncMutex::new(BTreeSet::new()),
notify: Notify::new(),
sender_count: AtomicUsize::new(1),
});
(
WatchSender {
inner: Arc::clone(&inner),
},
Receiver { inner },
)
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
struct PathWatchCounts {
non_recursive: usize,
recursive: usize,
}
impl PathWatchCounts {
fn increment(&mut self, recursive: bool, amount: usize) {
if recursive {
self.recursive += amount;
} else {
self.non_recursive += amount;
}
}
fn decrement(&mut self, recursive: bool, amount: usize) {
if recursive {
self.recursive = self.recursive.saturating_sub(amount);
} else {
self.non_recursive = self.non_recursive.saturating_sub(amount);
}
}
fn effective_mode(self) -> Option<RecursiveMode> {
if self.recursive > 0 {
Some(RecursiveMode::Recursive)
} else if self.non_recursive > 0 {
Some(RecursiveMode::NonRecursive)
} else {
None
}
}
fn is_empty(self) -> bool {
self.non_recursive == 0 && self.recursive == 0
}
}
struct FileWatcherInner {
watcher: RecommendedWatcher,
watched_paths: HashMap<PathBuf, RecursiveMode>,
}
/// Coalesces bursts of watch notifications and emits at most once per interval.
pub struct ThrottledWatchReceiver {
rx: Receiver,
interval: Duration,
next_allowed: Option<Instant>,
}
impl ThrottledWatchReceiver {
/// Creates a throttling wrapper around a raw watcher [`Receiver`].
pub fn new(rx: Receiver, interval: Duration) -> Self {
Self {
rx,
interval,
next_allowed: None,
}
}
/// Receives the next event, enforcing the configured minimum delay after
/// the previous emission.
pub async fn recv(&mut self) -> Option<FileWatcherEvent> {
if let Some(next_allowed) = self.next_allowed {
sleep_until(next_allowed).await;
}
let event = self.rx.recv().await;
if event.is_some() {
self.next_allowed = Some(Instant::now() + self.interval);
}
event
}
}
/// Handle used to register watched paths for one logical consumer.
pub struct FileWatcherSubscriber {
id: SubscriberId,
file_watcher: Arc<FileWatcher>,
}
impl FileWatcherSubscriber {
/// Registers the provided paths for this subscriber and returns an RAII
/// guard that unregisters them on drop.
pub fn register_paths(&self, watched_paths: Vec<WatchPath>) -> WatchRegistration {
let watched_paths = dedupe_watched_paths(watched_paths);
self.file_watcher.register_paths(self.id, &watched_paths);
WatchRegistration {
file_watcher: Arc::downgrade(&self.file_watcher),
subscriber_id: self.id,
watched_paths,
}
}
#[cfg(test)]
pub(crate) fn register_path(&self, path: PathBuf, recursive: bool) -> WatchRegistration {
self.register_paths(vec![WatchPath { path, recursive }])
}
}
impl Drop for FileWatcherSubscriber {
fn drop(&mut self) {
self.file_watcher.remove_subscriber(self.id);
}
}
/// RAII guard for a set of active path registrations.
pub struct WatchRegistration {
file_watcher: std::sync::Weak<FileWatcher>,
subscriber_id: SubscriberId,
watched_paths: Vec<WatchPath>,
}
impl Default for WatchRegistration {
fn default() -> Self {
Self {
file_watcher: std::sync::Weak::new(),
subscriber_id: 0,
watched_paths: Vec::new(),
}
}
}
impl Drop for WatchRegistration {
fn drop(&mut self) {
if let Some(file_watcher) = self.file_watcher.upgrade() {
file_watcher.unregister_paths(self.subscriber_id, &self.watched_paths);
}
}
}
/// Multi-subscriber file watcher built on top of `notify`.
pub struct FileWatcher {
inner: Option<Mutex<FileWatcherInner>>,
state: Arc<RwLock<WatchState>>,
}
impl FileWatcher {
/// Creates a live filesystem watcher and starts its background event loop
/// on the current Tokio runtime.
pub fn new() -> notify::Result<Self> {
let (raw_tx, raw_rx) = mpsc::unbounded_channel();
let raw_tx_clone = raw_tx;
let watcher = notify::recommended_watcher(move |res| {
let _ = raw_tx_clone.send(res);
})?;
let inner = FileWatcherInner {
watcher,
watched_paths: HashMap::new(),
};
let state = Arc::new(RwLock::new(WatchState::default()));
let file_watcher = Self {
inner: Some(Mutex::new(inner)),
state,
};
file_watcher.spawn_event_loop(raw_rx);
Ok(file_watcher)
}
/// Creates an inert watcher that only supports test-driven synthetic
/// notifications.
pub fn noop() -> Self {
Self {
inner: None,
state: Arc::new(RwLock::new(WatchState::default())),
}
}
/// Adds a new subscriber and returns both its registration handle and its
/// dedicated event receiver.
pub fn add_subscriber(self: &Arc<Self>) -> (FileWatcherSubscriber, Receiver) {
let (tx, rx) = watch_channel();
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let subscriber_id = state.next_subscriber_id;
state.next_subscriber_id += 1;
state.subscribers.insert(
subscriber_id,
SubscriberState {
watched_paths: HashMap::new(),
tx,
},
);
let subscriber = FileWatcherSubscriber {
id: subscriber_id,
file_watcher: self.clone(),
};
(subscriber, rx)
}
fn register_paths(&self, subscriber_id: SubscriberId, watched_paths: &[WatchPath]) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for watched_path in watched_paths {
{
let Some(subscriber) = state.subscribers.get_mut(&subscriber_id) else {
return;
};
*subscriber
.watched_paths
.entry(watched_path.clone())
.or_default() += 1;
}
let counts = state
.path_ref_counts
.entry(watched_path.path.clone())
.or_default();
let previous_mode = counts.effective_mode();
counts.increment(watched_path.recursive, /*amount*/ 1);
let next_mode = counts.effective_mode();
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
// Bridge `notify`'s callback-based events into the Tokio runtime and
// notify the matching subscribers.
fn spawn_event_loop(&self, mut raw_rx: mpsc::UnboundedReceiver<notify::Result<Event>>) {
if let Ok(handle) = Handle::try_current() {
let state = Arc::clone(&self.state);
handle.spawn(async move {
loop {
match raw_rx.recv().await {
Some(Ok(event)) => {
if !is_mutating_event(&event) {
continue;
}
if event.paths.is_empty() {
continue;
}
Self::notify_subscribers(&state, &event.paths).await;
}
Some(Err(err)) => {
warn!("file watcher error: {err}");
}
None => break,
}
}
});
} else {
warn!("file watcher loop skipped: no Tokio runtime available");
}
}
fn unregister_paths(&self, subscriber_id: SubscriberId, watched_paths: &[WatchPath]) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for watched_path in watched_paths {
{
let Some(subscriber) = state.subscribers.get_mut(&subscriber_id) else {
return;
};
let Some(subscriber_count) = subscriber.watched_paths.get_mut(watched_path) else {
continue;
};
*subscriber_count = subscriber_count.saturating_sub(1);
if *subscriber_count == 0 {
subscriber.watched_paths.remove(watched_path);
}
}
let Some(counts) = state.path_ref_counts.get_mut(&watched_path.path) else {
continue;
};
let previous_mode = counts.effective_mode();
counts.decrement(watched_path.recursive, /*amount*/ 1);
let next_mode = counts.effective_mode();
if counts.is_empty() {
state.path_ref_counts.remove(&watched_path.path);
}
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
fn remove_subscriber(&self, subscriber_id: SubscriberId) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let Some(subscriber) = state.subscribers.remove(&subscriber_id) else {
return;
};
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for (watched_path, count) in subscriber.watched_paths {
let Some(path_counts) = state.path_ref_counts.get_mut(&watched_path.path) else {
continue;
};
let previous_mode = path_counts.effective_mode();
path_counts.decrement(watched_path.recursive, count);
let next_mode = path_counts.effective_mode();
if path_counts.is_empty() {
state.path_ref_counts.remove(&watched_path.path);
}
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
fn reconfigure_watch<'a>(
&'a self,
path: &Path,
next_mode: Option<RecursiveMode>,
inner_guard: &mut Option<std::sync::MutexGuard<'a, FileWatcherInner>>,
) {
let Some(inner) = &self.inner else {
return;
};
if inner_guard.is_none() {
let guard = inner
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
*inner_guard = Some(guard);
}
let Some(guard) = inner_guard.as_mut() else {
return;
};
let existing_mode = guard.watched_paths.get(path).copied();
if existing_mode == next_mode {
return;
}
if existing_mode.is_some() {
if let Err(err) = guard.watcher.unwatch(path) {
warn!("failed to unwatch {}: {err}", path.display());
}
guard.watched_paths.remove(path);
}
let Some(next_mode) = next_mode else {
return;
};
if !path.exists() {
return;
}
if let Err(err) = guard.watcher.watch(path, next_mode) {
warn!("failed to watch {}: {err}", path.display());
return;
}
guard.watched_paths.insert(path.to_path_buf(), next_mode);
}
async fn notify_subscribers(state: &RwLock<WatchState>, event_paths: &[PathBuf]) {
let subscribers_to_notify: Vec<(WatchSender, Vec<PathBuf>)> = {
let state = state
.read()
.unwrap_or_else(std::sync::PoisonError::into_inner);
state
.subscribers
.values()
.filter_map(|subscriber| {
let changed_paths: Vec<PathBuf> = event_paths
.iter()
.filter(|event_path| {
subscriber.watched_paths.keys().any(|watched_path| {
watch_path_matches_event(watched_path, event_path)
})
})
.cloned()
.collect();
(!changed_paths.is_empty()).then_some((subscriber.tx.clone(), changed_paths))
})
.collect()
};
for (subscriber, changed_paths) in subscribers_to_notify {
subscriber.add_changed_paths(&changed_paths).await;
}
}
#[cfg(test)]
pub(crate) async fn send_paths_for_test(&self, paths: Vec<PathBuf>) {
Self::notify_subscribers(&self.state, &paths).await;
}
#[cfg(test)]
pub(crate) fn spawn_event_loop_for_test(
&self,
raw_rx: mpsc::UnboundedReceiver<notify::Result<Event>>,
) {
self.spawn_event_loop(raw_rx);
}
#[cfg(test)]
pub(crate) fn watch_counts_for_test(&self, path: &Path) -> Option<(usize, usize)> {
let state = self
.state
.read()
.unwrap_or_else(std::sync::PoisonError::into_inner);
state
.path_ref_counts
.get(path)
.map(|counts| (counts.non_recursive, counts.recursive))
}
}
fn is_mutating_event(event: &Event) -> bool {
matches!(
event.kind,
EventKind::Create(_) | EventKind::Modify(_) | EventKind::Remove(_)
)
}
fn dedupe_watched_paths(mut watched_paths: Vec<WatchPath>) -> Vec<WatchPath> {
watched_paths.sort_unstable_by(|a, b| {
a.path
.as_os_str()
.cmp(b.path.as_os_str())
.then(a.recursive.cmp(&b.recursive))
});
watched_paths.dedup();
watched_paths
}
fn watch_path_matches_event(watched_path: &WatchPath, event_path: &Path) -> bool {
if event_path == watched_path.path {
return true;
}
if watched_path.path.starts_with(event_path) {
return true;
}
if !event_path.starts_with(&watched_path.path) {
return false;
}
watched_path.recursive || event_path.parent() == Some(watched_path.path.as_path())
}
#[cfg(test)]
#[path = "file_watcher_tests.rs"]
mod tests;
pub use codex_file_watcher::FileWatcher;
pub use codex_file_watcher::FileWatcherEvent;
pub use codex_file_watcher::FileWatcherSubscriber;
pub use codex_file_watcher::Receiver;
pub use codex_file_watcher::ThrottledWatchReceiver;
pub use codex_file_watcher::WatchPath;
pub use codex_file_watcher::WatchRegistration;

View File

@@ -106,4 +106,5 @@ use review::run_guardian_review_session as run_guardian_review_session_for_test;
use review_session::build_guardian_review_session_config as build_guardian_review_session_config_for_test;
#[cfg(test)]
#[path = "../../tests/unit/guardian/tests.rs"]
mod tests;

View File

@@ -1,6 +1,6 @@
---
source: core/src/guardian/tests.rs
expression: "format!(\"{}\\n\\nshared_prompt_cache_key: {}\\nfollowup_contains_first_rationale: {}\",\ncontext_snapshot::format_labeled_requests_snapshot(\"Guardian follow-up review request layout\",\n&[(\"Initial Guardian Review Request\", &requests[0]),\n(\"Follow-up Guardian Review Request\", &requests[1]),],\n&guardian_snapshot_options(),), first_body[\"prompt_cache_key\"] ==\nsecond_body[\"prompt_cache_key\"],\nsecond_body.to_string().contains(first_rationale),)"
source: core/tests/unit/guardian/tests.rs
expression: snapshot
---
Scenario: Guardian follow-up review request layout

View File

@@ -1,6 +1,6 @@
---
source: core/src/guardian/tests.rs
expression: "context_snapshot::format_labeled_requests_snapshot(\"Guardian review request layout\",\n&[(\"Guardian Review Request\", &request)], &guardian_snapshot_options(),)"
source: core/tests/unit/guardian/tests.rs
expression: snapshot
---
Scenario: Guardian review request layout

View File

@@ -201,3 +201,6 @@ pub(crate) mod memory_trace;
pub use memory_trace::BuiltMemory;
pub use memory_trace::build_memories_from_trace_files;
pub mod otel_init;
#[cfg(test)]
mod core_unit_tests;

View File

@@ -1,371 +1,4 @@
use std::collections::HashSet;
use std::sync::LazyLock;
//! Compatibility re-exports for MCP approval template rendering.
use serde::Deserialize;
use serde::Serialize;
use serde_json::Map;
use serde_json::Value;
use tracing::warn;
const CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES_SCHEMA_VERSION: u8 = 4;
const CONNECTOR_NAME_TEMPLATE_VAR: &str = "{connector_name}";
static CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES: LazyLock<
Option<Vec<ConsequentialToolMessageTemplate>>,
> = LazyLock::new(load_consequential_tool_message_templates);
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct RenderedMcpToolApprovalTemplate {
pub(crate) question: String,
pub(crate) elicitation_message: String,
pub(crate) tool_params: Option<Value>,
pub(crate) tool_params_display: Vec<RenderedMcpToolApprovalParam>,
}
#[derive(Clone, Debug, PartialEq, Serialize)]
pub(crate) struct RenderedMcpToolApprovalParam {
pub(crate) name: String,
pub(crate) value: Value,
pub(crate) display_name: String,
}
#[derive(Debug, Deserialize)]
struct ConsequentialToolMessageTemplatesFile {
schema_version: u8,
templates: Vec<ConsequentialToolMessageTemplate>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
struct ConsequentialToolMessageTemplate {
connector_id: String,
server_name: String,
tool_title: String,
template: String,
template_params: Vec<ConsequentialToolTemplateParam>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
struct ConsequentialToolTemplateParam {
name: String,
label: String,
}
pub(crate) fn render_mcp_tool_approval_template(
server_name: &str,
connector_id: Option<&str>,
connector_name: Option<&str>,
tool_title: Option<&str>,
tool_params: Option<&Value>,
) -> Option<RenderedMcpToolApprovalTemplate> {
let templates = CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES.as_ref()?;
render_mcp_tool_approval_template_from_templates(
templates,
server_name,
connector_id,
connector_name,
tool_title,
tool_params,
)
}
fn load_consequential_tool_message_templates() -> Option<Vec<ConsequentialToolMessageTemplate>> {
let templates = match serde_json::from_str::<ConsequentialToolMessageTemplatesFile>(
include_str!("consequential_tool_message_templates.json"),
) {
Ok(templates) => templates,
Err(err) => {
warn!(error = %err, "failed to parse consequential tool approval templates");
return None;
}
};
if templates.schema_version != CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES_SCHEMA_VERSION {
warn!(
found_schema_version = templates.schema_version,
expected_schema_version = CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES_SCHEMA_VERSION,
"unexpected consequential tool approval templates schema version"
);
return None;
}
Some(templates.templates)
}
fn render_mcp_tool_approval_template_from_templates(
templates: &[ConsequentialToolMessageTemplate],
server_name: &str,
connector_id: Option<&str>,
connector_name: Option<&str>,
tool_title: Option<&str>,
tool_params: Option<&Value>,
) -> Option<RenderedMcpToolApprovalTemplate> {
let connector_id = connector_id?;
let tool_title = tool_title.map(str::trim).filter(|name| !name.is_empty())?;
let template = templates.iter().find(|template| {
template.server_name == server_name
&& template.connector_id == connector_id
&& template.tool_title == tool_title
})?;
let elicitation_message = render_question_template(&template.template, connector_name)?;
let (tool_params, tool_params_display) = match tool_params {
Some(Value::Object(tool_params)) => {
render_tool_params(tool_params, &template.template_params)?
}
Some(_) => return None,
None => (None, Vec::new()),
};
Some(RenderedMcpToolApprovalTemplate {
question: elicitation_message.clone(),
elicitation_message,
tool_params,
tool_params_display,
})
}
fn render_question_template(template: &str, connector_name: Option<&str>) -> Option<String> {
let template = template.trim();
if template.is_empty() {
return None;
}
if template.contains(CONNECTOR_NAME_TEMPLATE_VAR) {
let connector_name = connector_name
.map(str::trim)
.filter(|name| !name.is_empty())?;
return Some(template.replace(CONNECTOR_NAME_TEMPLATE_VAR, connector_name));
}
Some(template.to_string())
}
fn render_tool_params(
tool_params: &Map<String, Value>,
template_params: &[ConsequentialToolTemplateParam],
) -> Option<(Option<Value>, Vec<RenderedMcpToolApprovalParam>)> {
let mut display_params = Vec::new();
let mut display_names = HashSet::new();
let mut handled_names = HashSet::new();
for template_param in template_params {
let label = template_param.label.trim();
if label.is_empty() {
return None;
}
let Some(value) = tool_params.get(&template_param.name) else {
continue;
};
if !display_names.insert(label.to_string()) {
return None;
}
display_params.push(RenderedMcpToolApprovalParam {
name: template_param.name.clone(),
value: value.clone(),
display_name: label.to_string(),
});
handled_names.insert(template_param.name.as_str());
}
let mut remaining_params = tool_params
.iter()
.filter(|(name, _)| !handled_names.contains(name.as_str()))
.collect::<Vec<_>>();
remaining_params.sort_by(|(left_name, _), (right_name, _)| left_name.cmp(right_name));
for (name, value) in remaining_params {
if handled_names.contains(name.as_str()) {
continue;
}
if !display_names.insert(name.clone()) {
return None;
}
display_params.push(RenderedMcpToolApprovalParam {
name: name.clone(),
value: value.clone(),
display_name: name.clone(),
});
}
Some((Some(Value::Object(tool_params.clone())), display_params))
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use serde_json::json;
use super::*;
#[test]
fn renders_exact_match_with_readable_param_labels() {
let templates = vec![ConsequentialToolMessageTemplate {
connector_id: "calendar".to_string(),
server_name: "codex_apps".to_string(),
tool_title: "create_event".to_string(),
template: "Allow {connector_name} to create an event?".to_string(),
template_params: vec![
ConsequentialToolTemplateParam {
name: "calendar_id".to_string(),
label: "Calendar".to_string(),
},
ConsequentialToolTemplateParam {
name: "title".to_string(),
label: "Title".to_string(),
},
],
}];
let rendered = render_mcp_tool_approval_template_from_templates(
&templates,
"codex_apps",
Some("calendar"),
Some("Calendar"),
Some("create_event"),
Some(&json!({
"title": "Roadmap review",
"calendar_id": "primary",
"timezone": "UTC",
})),
);
assert_eq!(
rendered,
Some(RenderedMcpToolApprovalTemplate {
question: "Allow Calendar to create an event?".to_string(),
elicitation_message: "Allow Calendar to create an event?".to_string(),
tool_params: Some(json!({
"title": "Roadmap review",
"calendar_id": "primary",
"timezone": "UTC",
})),
tool_params_display: vec![
RenderedMcpToolApprovalParam {
name: "calendar_id".to_string(),
value: json!("primary"),
display_name: "Calendar".to_string(),
},
RenderedMcpToolApprovalParam {
name: "title".to_string(),
value: json!("Roadmap review"),
display_name: "Title".to_string(),
},
RenderedMcpToolApprovalParam {
name: "timezone".to_string(),
value: json!("UTC"),
display_name: "timezone".to_string(),
},
],
})
);
}
#[test]
fn returns_none_when_no_exact_match_exists() {
let templates = vec![ConsequentialToolMessageTemplate {
connector_id: "calendar".to_string(),
server_name: "codex_apps".to_string(),
tool_title: "create_event".to_string(),
template: "Allow {connector_name} to create an event?".to_string(),
template_params: Vec::new(),
}];
assert_eq!(
render_mcp_tool_approval_template_from_templates(
&templates,
"codex_apps",
Some("calendar"),
Some("Calendar"),
Some("delete_event"),
Some(&json!({})),
),
None
);
}
#[test]
fn returns_none_when_relabeling_would_collide() {
let templates = vec![ConsequentialToolMessageTemplate {
connector_id: "calendar".to_string(),
server_name: "codex_apps".to_string(),
tool_title: "create_event".to_string(),
template: "Allow {connector_name} to create an event?".to_string(),
template_params: vec![ConsequentialToolTemplateParam {
name: "calendar_id".to_string(),
label: "timezone".to_string(),
}],
}];
assert_eq!(
render_mcp_tool_approval_template_from_templates(
&templates,
"codex_apps",
Some("calendar"),
Some("Calendar"),
Some("create_event"),
Some(&json!({
"calendar_id": "primary",
"timezone": "UTC",
})),
),
None
);
}
#[test]
fn bundled_templates_load() {
assert_eq!(CONSEQUENTIAL_TOOL_MESSAGE_TEMPLATES.is_some(), true);
}
#[test]
fn renders_literal_template_without_connector_substitution() {
let templates = vec![ConsequentialToolMessageTemplate {
connector_id: "github".to_string(),
server_name: "codex_apps".to_string(),
tool_title: "add_comment".to_string(),
template: "Allow GitHub to add a comment to a pull request?".to_string(),
template_params: Vec::new(),
}];
let rendered = render_mcp_tool_approval_template_from_templates(
&templates,
"codex_apps",
Some("github"),
/*connector_name*/ None,
Some("add_comment"),
Some(&json!({})),
);
assert_eq!(
rendered,
Some(RenderedMcpToolApprovalTemplate {
question: "Allow GitHub to add a comment to a pull request?".to_string(),
elicitation_message: "Allow GitHub to add a comment to a pull request?".to_string(),
tool_params: Some(json!({})),
tool_params_display: Vec::new(),
})
);
}
#[test]
fn returns_none_when_connector_placeholder_has_no_value() {
let templates = vec![ConsequentialToolMessageTemplate {
connector_id: "calendar".to_string(),
server_name: "codex_apps".to_string(),
tool_title: "create_event".to_string(),
template: "Allow {connector_name} to create an event?".to_string(),
template_params: Vec::new(),
}];
assert_eq!(
render_mcp_tool_approval_template_from_templates(
&templates,
"codex_apps",
Some("calendar"),
/*connector_name*/ None,
Some("create_event"),
Some(&json!({})),
),
None
);
}
}
pub(crate) use codex_mcp_tool_approval::RenderedMcpToolApprovalParam;
pub(crate) use codex_mcp_tool_approval::render_mcp_tool_approval_template;

View File

@@ -4,7 +4,7 @@ This module runs a startup memory pipeline for eligible sessions.
## Prompt Templates
Memory prompt templates live under `codex-rs/core/templates/memories/`.
Memory prompt templates live under `codex-rs/memory-prompts/src/`.
- The undated template files are the canonical latest versions used at runtime:
- `stage_one_system.md`

View File

@@ -39,7 +39,7 @@ mod phase_one {
/// Default reasoning effort used for phase 1.
pub(super) const REASONING_EFFORT: super::ReasoningEffort = super::ReasoningEffort::Low;
/// Prompt used for phase 1.
pub(super) const PROMPT: &str = include_str!("../../templates/memories/stage_one_system.md");
pub(super) const PROMPT: &str = codex_memory_prompts::STAGE_ONE_SYSTEM_PROMPT;
/// Concurrency cap for startup memory extraction and consolidation scheduling.
pub(super) const CONCURRENCY_LIMIT: usize = 8;
/// Fallback stage-1 rollout truncation limit (tokens) when model metadata

View File

@@ -20,19 +20,19 @@ use tracing::warn;
static CONSOLIDATION_PROMPT_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
parse_embedded_template(
include_str!("../../templates/memories/consolidation.md"),
codex_memory_prompts::CONSOLIDATION_PROMPT,
"memories/consolidation.md",
)
});
static STAGE_ONE_INPUT_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
parse_embedded_template(
include_str!("../../templates/memories/stage_one_input.md"),
codex_memory_prompts::STAGE_ONE_INPUT_PROMPT,
"memories/stage_one_input.md",
)
});
static MEMORY_TOOL_DEVELOPER_INSTRUCTIONS_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
parse_embedded_template(
include_str!("../../templates/memories/read_path.md"),
codex_memory_prompts::READ_PATH_PROMPT,
"memories/read_path.md",
)
});

View File

@@ -1,418 +1,26 @@
//! Persistence layer for the global, append-only *message history* file.
//!
//! The history is stored at `~/.codex/history.jsonl` with **one JSON object per
//! line** so that it can be efficiently appended to and parsed with standard
//! JSON-Lines tooling. Each record has the following schema:
//!
//! ````text
//! {"conversation_id":"<uuid>","ts":<unix_seconds>,"text":"<message>"}
//! ````
//!
//! To minimise the chance of interleaved writes when multiple processes are
//! appending concurrently, callers should *prepare the full line* (record +
//! trailing `\n`) and write it with a **single `write(2)` system call** while
//! the file descriptor is opened with the `O_APPEND` flag. POSIX guarantees
//! that writes up to `PIPE_BUF` bytes are atomic in that case.
//! Note: `conversation_id` stores the thread id; the field name is preserved for
//! backwards compatibility with existing history files.
use std::fs::File;
use std::fs::OpenOptions;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Read;
use std::io::Result;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::path::Path;
use serde::Deserialize;
use serde::Serialize;
use std::time::Duration;
use tokio::fs;
use tokio::io::AsyncReadExt;
use crate::config::Config;
use codex_config::types::HistoryPersistence;
use codex_utils_absolute_path::AbsolutePathBuf;
pub use codex_message_history::HistoryEntry;
use codex_message_history::MessageHistoryConfig;
use codex_protocol::ThreadId;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::io::Result;
/// Filename that stores the message history inside `~/.codex`.
const HISTORY_FILENAME: &str = "history.jsonl";
/// When history exceeds the hard cap, trim it down to this fraction of `max_bytes`.
const HISTORY_SOFT_CAP_RATIO: f64 = 0.8;
const MAX_RETRIES: usize = 10;
const RETRY_SLEEP: Duration = Duration::from_millis(100);
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct HistoryEntry {
pub session_id: String,
pub ts: u64,
pub text: String,
fn message_history_config(config: &Config) -> MessageHistoryConfig {
MessageHistoryConfig {
codex_home: config.codex_home.clone(),
persistence: config.history.persistence,
max_bytes: config.history.max_bytes,
}
}
fn history_filepath(config: &Config) -> AbsolutePathBuf {
config.codex_home.join(HISTORY_FILENAME)
}
/// Append a `text` entry associated with `conversation_id` to the history file.
///
/// Uses advisory file locking (`File::try_lock`) with a retry loop to ensure
/// concurrent writes from multiple TUI processes do not interleave. The lock
/// acquisition and write are performed inside `spawn_blocking` so the caller's
/// async runtime is not blocked.
///
/// The entry is silently skipped when `config.history.persistence` is
/// [`HistoryPersistence::None`].
///
/// # Errors
///
/// Returns an I/O error if the history file cannot be opened/created, the
/// system clock is before the Unix epoch, or the exclusive lock cannot be
/// acquired after [`MAX_RETRIES`] attempts.
pub async fn append_entry(text: &str, conversation_id: &ThreadId, config: &Config) -> Result<()> {
match config.history.persistence {
HistoryPersistence::SaveAll => {
// Save everything: proceed.
}
HistoryPersistence::None => {
// No history persistence requested.
return Ok(());
}
}
// TODO: check `text` for sensitive patterns
// Resolve `~/.codex/history.jsonl` and ensure the parent directory exists.
let path = history_filepath(config);
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent).await?;
}
// Compute timestamp (seconds since the Unix epoch).
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_err(|e| std::io::Error::other(format!("system clock before Unix epoch: {e}")))?
.as_secs();
// Construct the JSON line first so we can write it in a single syscall.
let entry = HistoryEntry {
session_id: conversation_id.to_string(),
ts,
text: text.to_string(),
};
let mut line = serde_json::to_string(&entry)
.map_err(|e| std::io::Error::other(format!("failed to serialise history entry: {e}")))?;
line.push('\n');
// Open the history file for read/write access (append-only on Unix).
let mut options = OpenOptions::new();
options.read(true).write(true).create(true);
#[cfg(unix)]
{
options.append(true);
options.mode(0o600);
}
let mut history_file = options.open(&path)?;
// Ensure permissions.
ensure_owner_only_permissions(&history_file).await?;
let history_max_bytes = config.history.max_bytes;
// Perform a blocking write under an advisory write lock using std::fs.
tokio::task::spawn_blocking(move || -> Result<()> {
// Retry a few times to avoid indefinite blocking when contended.
for _ in 0..MAX_RETRIES {
match history_file.try_lock() {
Ok(()) => {
// While holding the exclusive lock, write the full line.
// We do not open the file with `append(true)` on Windows, so ensure the
// cursor is positioned at the end before writing.
history_file.seek(SeekFrom::End(0))?;
history_file.write_all(line.as_bytes())?;
history_file.flush()?;
enforce_history_limit(&mut history_file, history_max_bytes)?;
return Ok(());
}
Err(std::fs::TryLockError::WouldBlock) => {
std::thread::sleep(RETRY_SLEEP);
}
Err(e) => return Err(e.into()),
}
}
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"could not acquire exclusive lock on history file after multiple attempts",
))
})
.await??;
Ok(())
codex_message_history::append_entry(text, conversation_id, &message_history_config(config))
.await
}
/// Trim the history file to honor `max_bytes`, dropping the oldest lines while holding
/// the write lock so the newest entry is always retained. When the file exceeds the
/// hard cap, it rewrites the remaining tail to a soft cap to avoid trimming again
/// immediately on the next write.
fn enforce_history_limit(file: &mut File, max_bytes: Option<usize>) -> Result<()> {
let Some(max_bytes) = max_bytes else {
return Ok(());
};
if max_bytes == 0 {
return Ok(());
}
let max_bytes = match u64::try_from(max_bytes) {
Ok(value) => value,
Err(_) => return Ok(()),
};
let mut current_len = file.metadata()?.len();
if current_len <= max_bytes {
return Ok(());
}
let mut reader_file = file.try_clone()?;
reader_file.seek(SeekFrom::Start(0))?;
let mut buf_reader = BufReader::new(reader_file);
let mut line_lengths = Vec::new();
let mut line_buf = String::new();
loop {
line_buf.clear();
let bytes = buf_reader.read_line(&mut line_buf)?;
if bytes == 0 {
break;
}
line_lengths.push(bytes as u64);
}
if line_lengths.is_empty() {
return Ok(());
}
let last_index = line_lengths.len() - 1;
let trim_target = trim_target_bytes(max_bytes, line_lengths[last_index]);
let mut drop_bytes = 0u64;
let mut idx = 0usize;
while current_len > trim_target && idx < last_index {
current_len = current_len.saturating_sub(line_lengths[idx]);
drop_bytes += line_lengths[idx];
idx += 1;
}
if drop_bytes == 0 {
return Ok(());
}
let mut reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(drop_bytes))?;
let capacity = usize::try_from(current_len).unwrap_or(0);
let mut tail = Vec::with_capacity(capacity);
reader.read_to_end(&mut tail)?;
file.set_len(0)?;
file.seek(SeekFrom::Start(0))?;
file.write_all(&tail)?;
file.flush()?;
Ok(())
}
fn trim_target_bytes(max_bytes: u64, newest_entry_len: u64) -> u64 {
let soft_cap_bytes = ((max_bytes as f64) * HISTORY_SOFT_CAP_RATIO)
.floor()
.clamp(1.0, max_bytes as f64) as u64;
soft_cap_bytes.max(newest_entry_len)
}
/// Asynchronously fetch the history file's *identifier* and current entry count.
///
/// The identifier is the file's inode on Unix or creation time on Windows.
/// The entry count is derived by counting newline bytes in the file. Returns
/// `(0, 0)` when the file does not exist or its metadata cannot be read. If
/// metadata succeeds but the file cannot be opened or scanned, returns
/// `(log_id, 0)` so callers can still detect that a history file exists.
pub async fn history_metadata(config: &Config) -> (u64, usize) {
let path = history_filepath(config);
history_metadata_for_file(&path).await
codex_message_history::history_metadata(&message_history_config(config)).await
}
/// Look up a single history entry by file identity and zero-based offset.
///
/// Returns `Some(entry)` when the current history file's identifier (inode on
/// Unix, creation time on Windows) matches `log_id` **and** a valid JSON
/// record exists at `offset`. Returns `None` on any mismatch, I/O error, or
/// parse failure, all of which are logged at `warn` level.
///
/// This function is synchronous because it acquires a shared advisory file lock
/// via `File::try_lock_shared`. Callers on an async runtime should wrap it in
/// `spawn_blocking`.
pub fn lookup(log_id: u64, offset: usize, config: &Config) -> Option<HistoryEntry> {
let path = history_filepath(config);
lookup_history_entry(&path, log_id, offset)
codex_message_history::lookup(log_id, offset, &message_history_config(config))
}
/// On Unix systems, ensure the file permissions are `0o600` (rw-------). If the
/// permissions cannot be changed the error is propagated to the caller.
#[cfg(unix)]
async fn ensure_owner_only_permissions(file: &File) -> Result<()> {
let metadata = file.metadata()?;
let current_mode = metadata.permissions().mode() & 0o777;
if current_mode != 0o600 {
let mut perms = metadata.permissions();
perms.set_mode(0o600);
let perms_clone = perms.clone();
let file_clone = file.try_clone()?;
tokio::task::spawn_blocking(move || file_clone.set_permissions(perms_clone)).await??;
}
Ok(())
}
#[cfg(windows)]
// On Windows, simply succeed.
async fn ensure_owner_only_permissions(_file: &File) -> Result<()> {
Ok(())
}
async fn history_metadata_for_file(path: &Path) -> (u64, usize) {
let log_id = match fs::metadata(path).await {
Ok(metadata) => history_log_id(&metadata).unwrap_or(0),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return (0, 0),
Err(_) => return (0, 0),
};
// Open the file.
let mut file = match fs::File::open(path).await {
Ok(f) => f,
Err(_) => return (log_id, 0),
};
// Count newline bytes.
let mut buf = [0u8; 8192];
let mut count = 0usize;
loop {
match file.read(&mut buf).await {
Ok(0) => break,
Ok(n) => {
count += buf[..n].iter().filter(|&&b| b == b'\n').count();
}
Err(_) => return (log_id, 0),
}
}
(log_id, count)
}
fn lookup_history_entry(path: &Path, log_id: u64, offset: usize) -> Option<HistoryEntry> {
use std::io::BufRead;
use std::io::BufReader;
let file: File = match OpenOptions::new().read(true).open(path) {
Ok(f) => f,
Err(e) => {
tracing::warn!(error = %e, "failed to open history file");
return None;
}
};
let metadata = match file.metadata() {
Ok(m) => m,
Err(e) => {
tracing::warn!(error = %e, "failed to stat history file");
return None;
}
};
let current_log_id = history_log_id(&metadata)?;
if log_id != 0 && current_log_id != log_id {
return None;
}
// Open & lock file for reading using a shared lock.
// Retry a few times to avoid indefinite blocking.
for _ in 0..MAX_RETRIES {
let lock_result = file.try_lock_shared();
match lock_result {
Ok(()) => {
let reader = BufReader::new(&file);
for (idx, line_res) in reader.lines().enumerate() {
let line = match line_res {
Ok(l) => l,
Err(e) => {
tracing::warn!(error = %e, "failed to read line from history file");
return None;
}
};
if idx == offset {
match serde_json::from_str::<HistoryEntry>(&line) {
Ok(entry) => return Some(entry),
Err(e) => {
tracing::warn!(error = %e, "failed to parse history entry");
return None;
}
}
}
}
// Not found at requested offset.
return None;
}
Err(std::fs::TryLockError::WouldBlock) => {
std::thread::sleep(RETRY_SLEEP);
}
Err(e) => {
tracing::warn!(error = %e, "failed to acquire shared lock on history file");
return None;
}
}
}
None
}
#[cfg(unix)]
fn history_log_id(metadata: &std::fs::Metadata) -> Option<u64> {
use std::os::unix::fs::MetadataExt;
Some(metadata.ino())
}
#[cfg(windows)]
fn history_log_id(metadata: &std::fs::Metadata) -> Option<u64> {
use std::os::windows::fs::MetadataExt;
Some(metadata.creation_time())
}
#[cfg(not(any(unix, windows)))]
fn history_log_id(_metadata: &std::fs::Metadata) -> Option<u64> {
None
}
#[cfg(test)]
#[path = "message_history_tests.rs"]
mod tests;

View File

@@ -1586,5 +1586,5 @@ fn configured_plugins_from_user_config_value(
}
#[cfg(test)]
#[path = "manager_tests.rs"]
#[path = "../../tests/unit/plugins/manager_tests.rs"]
mod tests;

View File

@@ -1,82 +1 @@
use codex_protocol::protocol::ReviewFinding;
use codex_protocol::protocol::ReviewOutputEvent;
// Note: We keep this module UI-agnostic. It returns plain strings that
// higher layers (e.g., TUI) may style as needed.
fn format_location(item: &ReviewFinding) -> String {
let path = item.code_location.absolute_file_path.display();
let start = item.code_location.line_range.start;
let end = item.code_location.line_range.end;
format!("{path}:{start}-{end}")
}
const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response.";
/// Format a full review findings block as plain text lines.
///
/// - When `selection` is `Some`, each item line includes a checkbox marker:
/// "[x]" for selected items and "[ ]" for unselected. Missing indices
/// default to selected.
/// - When `selection` is `None`, the marker is omitted and a simple bullet is
/// rendered ("- Title — path:start-end").
pub fn format_review_findings_block(
findings: &[ReviewFinding],
selection: Option<&[bool]>,
) -> String {
let mut lines: Vec<String> = Vec::new();
lines.push(String::new());
// Header
if findings.len() > 1 {
lines.push("Full review comments:".to_string());
} else {
lines.push("Review comment:".to_string());
}
for (idx, item) in findings.iter().enumerate() {
lines.push(String::new());
let title = &item.title;
let location = format_location(item);
if let Some(flags) = selection {
// Default to selected if index is out of bounds.
let checked = flags.get(idx).copied().unwrap_or(true);
let marker = if checked { "[x]" } else { "[ ]" };
lines.push(format!("- {marker} {title}{location}"));
} else {
lines.push(format!("- {title}{location}"));
}
for body_line in item.body.lines() {
lines.push(format!(" {body_line}"));
}
}
lines.join("\n")
}
/// Render a human-readable review summary suitable for a user-facing message.
///
/// Returns either the explanation, the formatted findings block, or both
/// separated by a blank line. If neither is present, emits a fallback message.
pub fn render_review_output_text(output: &ReviewOutputEvent) -> String {
let mut sections = Vec::new();
let explanation = output.overall_explanation.trim();
if !explanation.is_empty() {
sections.push(explanation.to_string());
}
if !output.findings.is_empty() {
let findings = format_review_findings_block(&output.findings, /*selection*/ None);
let trimmed = findings.trim();
if !trimmed.is_empty() {
sections.push(trimmed.to_string());
}
}
if sections.is_empty() {
REVIEW_FALLBACK_MESSAGE.to_string()
} else {
sections.join("\n\n")
}
}
pub use codex_review::format::*;

View File

@@ -1,185 +1 @@
use codex_git_utils::merge_base_with_head;
use codex_protocol::protocol::ReviewRequest;
use codex_protocol::protocol::ReviewTarget;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_template::Template;
use std::sync::LazyLock;
#[derive(Clone, Debug, PartialEq)]
pub struct ResolvedReviewRequest {
pub target: ReviewTarget,
pub prompt: String,
pub user_facing_hint: String,
}
const UNCOMMITTED_PROMPT: &str = "Review the current code changes (staged, unstaged, and untracked files) and provide prioritized findings.";
const BASE_BRANCH_PROMPT_BACKUP: &str = "Review the code changes against the base branch '{{branch}}'. Start by finding the merge diff between the current branch and {{branch}}'s upstream e.g. (`git merge-base HEAD \"$(git rev-parse --abbrev-ref \"{{branch}}@{upstream}\")\"`), then run `git diff` against that SHA to see what changes we would merge into the {{branch}} branch. Provide prioritized, actionable findings.";
const BASE_BRANCH_PROMPT: &str = "Review the code changes against the base branch '{{base_branch}}'. The merge base commit for this comparison is {{merge_base_sha}}. Run `git diff {{merge_base_sha}}` to inspect the changes relative to {{base_branch}}. Provide prioritized, actionable findings.";
static BASE_BRANCH_PROMPT_BACKUP_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
Template::parse(BASE_BRANCH_PROMPT_BACKUP)
.unwrap_or_else(|err| panic!("base branch backup review prompt must parse: {err}"))
});
static BASE_BRANCH_PROMPT_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
Template::parse(BASE_BRANCH_PROMPT)
.unwrap_or_else(|err| panic!("base branch review prompt must parse: {err}"))
});
const COMMIT_PROMPT_WITH_TITLE: &str = "Review the code changes introduced by commit {{sha}} (\"{{title}}\"). Provide prioritized, actionable findings.";
const COMMIT_PROMPT: &str = "Review the code changes introduced by commit {{sha}}. Provide prioritized, actionable findings.";
static COMMIT_PROMPT_WITH_TITLE_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
Template::parse(COMMIT_PROMPT_WITH_TITLE)
.unwrap_or_else(|err| panic!("commit review prompt with title must parse: {err}"))
});
static COMMIT_PROMPT_TEMPLATE: LazyLock<Template> = LazyLock::new(|| {
Template::parse(COMMIT_PROMPT)
.unwrap_or_else(|err| panic!("commit review prompt must parse: {err}"))
});
pub fn resolve_review_request(
request: ReviewRequest,
cwd: &AbsolutePathBuf,
) -> anyhow::Result<ResolvedReviewRequest> {
let target = request.target;
let prompt = review_prompt(&target, cwd)?;
let user_facing_hint = request
.user_facing_hint
.unwrap_or_else(|| user_facing_hint(&target));
Ok(ResolvedReviewRequest {
target,
prompt,
user_facing_hint,
})
}
pub fn review_prompt(target: &ReviewTarget, cwd: &AbsolutePathBuf) -> anyhow::Result<String> {
match target {
ReviewTarget::UncommittedChanges => Ok(UNCOMMITTED_PROMPT.to_string()),
ReviewTarget::BaseBranch { branch } => {
if let Some(commit) = merge_base_with_head(cwd, branch)? {
Ok(render_review_prompt(
&BASE_BRANCH_PROMPT_TEMPLATE,
[
("base_branch", branch.as_str()),
("merge_base_sha", commit.as_str()),
],
))
} else {
Ok(render_review_prompt(
&BASE_BRANCH_PROMPT_BACKUP_TEMPLATE,
[("branch", branch.as_str())],
))
}
}
ReviewTarget::Commit { sha, title } => {
if let Some(title) = title {
Ok(render_review_prompt(
&COMMIT_PROMPT_WITH_TITLE_TEMPLATE,
[("sha", sha.as_str()), ("title", title.as_str())],
))
} else {
Ok(render_review_prompt(
&COMMIT_PROMPT_TEMPLATE,
[("sha", sha.as_str())],
))
}
}
ReviewTarget::Custom { instructions } => {
let prompt = instructions.trim();
if prompt.is_empty() {
anyhow::bail!("Review prompt cannot be empty");
}
Ok(prompt.to_string())
}
}
}
fn render_review_prompt<'a, const N: usize>(
template: &Template,
variables: [(&'a str, &'a str); N],
) -> String {
template
.render(variables)
.unwrap_or_else(|err| panic!("review prompt template must render: {err}"))
}
pub fn user_facing_hint(target: &ReviewTarget) -> String {
match target {
ReviewTarget::UncommittedChanges => "current changes".to_string(),
ReviewTarget::BaseBranch { branch } => format!("changes against '{branch}'"),
ReviewTarget::Commit { sha, title } => {
let short_sha: String = sha.chars().take(7).collect();
if let Some(title) = title {
format!("commit {short_sha}: {title}")
} else {
format!("commit {short_sha}")
}
}
ReviewTarget::Custom { instructions } => instructions.trim().to_string(),
}
}
impl From<ResolvedReviewRequest> for ReviewRequest {
fn from(resolved: ResolvedReviewRequest) -> Self {
ReviewRequest {
target: resolved.target,
user_facing_hint: Some(resolved.user_facing_hint),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn review_prompt_template_renders_base_branch_backup_variant() {
assert_eq!(
render_review_prompt(&BASE_BRANCH_PROMPT_BACKUP_TEMPLATE, [("branch", "main")]),
"Review the code changes against the base branch 'main'. Start by finding the merge diff between the current branch and main's upstream e.g. (`git merge-base HEAD \"$(git rev-parse --abbrev-ref \"main@{upstream}\")\"`), then run `git diff` against that SHA to see what changes we would merge into the main branch. Provide prioritized, actionable findings."
);
}
#[test]
fn review_prompt_template_renders_base_branch_variant() {
assert_eq!(
render_review_prompt(
&BASE_BRANCH_PROMPT_TEMPLATE,
[("base_branch", "main"), ("merge_base_sha", "abc123")]
),
"Review the code changes against the base branch 'main'. The merge base commit for this comparison is abc123. Run `git diff abc123` to inspect the changes relative to main. Provide prioritized, actionable findings."
);
}
#[test]
fn review_prompt_template_renders_commit_variant() {
assert_eq!(
review_prompt(
&ReviewTarget::Commit {
sha: "deadbeef".to_string(),
title: None,
},
&AbsolutePathBuf::current_dir().expect("cwd"),
)
.expect("commit prompt should render"),
"Review the code changes introduced by commit deadbeef. Provide prioritized, actionable findings."
);
}
#[test]
fn review_prompt_template_renders_commit_variant_with_title() {
assert_eq!(
review_prompt(
&ReviewTarget::Commit {
sha: "deadbeef".to_string(),
title: Some("Fix bug".to_string()),
},
&AbsolutePathBuf::current_dir().expect("cwd"),
)
.expect("commit prompt should render"),
"Review the code changes introduced by commit deadbeef (\"Fix bug\"). Provide prioritized, actionable findings."
);
}
}
pub use codex_review::prompts::*;

View File

@@ -41,6 +41,7 @@ use codex_analytics::SubAgentThreadStartedInput;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::McpServerElicitationRequest;
use codex_app_server_protocol::McpServerElicitationRequestParams;
use codex_code_mode::CodeModeRuntimeFactory;
use codex_config::types::OAuthCredentialsStoreMode;
use codex_exec_server::Environment;
use codex_exec_server::EnvironmentManager;
@@ -393,6 +394,7 @@ pub(crate) struct CodexSpawnArgs {
pub(crate) user_shell_override: Option<shell::Shell>,
pub(crate) parent_trace: Option<W3cTraceContext>,
pub(crate) analytics_events_client: Option<AnalyticsEventsClient>,
pub(crate) code_mode_runtime_factory: CodeModeRuntimeFactory,
}
pub(crate) const INITIAL_SUBMIT_ID: &str = "";
@@ -447,6 +449,7 @@ impl Codex {
inherited_exec_policy,
parent_trace: _,
analytics_events_client,
code_mode_runtime_factory,
} = args;
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let (tx_event, rx_event) = async_channel::unbounded();
@@ -643,6 +646,7 @@ impl Codex {
agent_control,
environment,
analytics_events_client,
Some(code_mode_runtime_factory),
)
.await
.map_err(|e| {
@@ -3118,4 +3122,5 @@ fn errors_to_info(errors: &[SkillError]) -> Vec<SkillErrorInfo> {
use crate::memories::prompts::build_memory_tool_developer_instructions;
#[cfg(test)]
#[path = "../../tests/unit/codex_tests.rs"]
pub(crate) mod tests;

View File

@@ -224,6 +224,7 @@ impl Session {
agent_control: AgentControl,
environment: Option<Arc<Environment>>,
analytics_events_client: Option<AnalyticsEventsClient>,
code_mode_runtime_factory: Option<codex_code_mode::CodeModeRuntimeFactory>,
) -> anyhow::Result<Arc<Self>> {
debug!(
"Configuring session: model={}; provider={:?}",
@@ -415,6 +416,9 @@ impl Session {
});
}
let code_mode_runtime_factory =
code_mode_runtime_factory.unwrap_or_else(codex_code_mode::default_runtime_factory);
let auth = auth.as_ref();
let auth_mode = auth.map(CodexAuth::auth_mode).map(TelemetryAuthMode::from);
let account_id = auth.and_then(CodexAuth::get_account_id);
@@ -673,8 +677,9 @@ impl Session {
config.features.enabled(Feature::RuntimeMetrics),
Self::build_model_client_beta_features_header(config.as_ref()),
),
code_mode_service: crate::tools::code_mode::CodeModeService::new(
config.js_repl_node_path.clone(),
code_mode_runtime_factory: Arc::clone(&code_mode_runtime_factory),
code_mode_service: crate::tools::code_mode::CodeModeService::from_runtime(
code_mode_runtime_factory(),
),
environment,
};

View File

@@ -1,6 +1,6 @@
---
source: core/src/session/tests.rs
assertion_line: 1619
source: core/tests/unit/codex_tests.rs
assertion_line: 1591
expression: snapshot
---
Scenario: First request after fork when startup preserves the parent baseline, the fork changes approval policy, and the first forked turn enters plan mode.

View File

@@ -102,9 +102,10 @@ mod tests {
let file_watcher = Arc::new(FileWatcher::noop());
let skills_watcher = SkillsWatcher::new(&file_watcher);
let mut rx = skills_watcher.subscribe();
let _registration = skills_watcher
.subscriber
.register_path(PathBuf::from("/tmp/skill"), /*recursive*/ true);
let _registration = skills_watcher.subscriber.register_paths(vec![WatchPath {
path: PathBuf::from("/tmp/skill"),
recursive: true,
}]);
file_watcher
.send_paths_for_test(vec![PathBuf::from("/tmp/skill/SKILL.md")])

View File

@@ -2,10 +2,10 @@ mod service;
mod session;
mod turn;
pub(crate) use codex_session_runtime::MailboxDeliveryPhase;
pub(crate) use codex_session_runtime::TurnState;
pub(crate) use service::SessionServices;
pub(crate) use session::SessionState;
pub(crate) use turn::ActiveTurn;
pub(crate) use turn::MailboxDeliveryPhase;
pub(crate) use turn::RunningTask;
pub(crate) use turn::TaskKind;
pub(crate) use turn::TurnState;

View File

@@ -17,6 +17,7 @@ use crate::tools::network_approval::NetworkApprovalService;
use crate::tools::sandboxing::ApprovalStore;
use crate::unified_exec::UnifiedExecProcessManager;
use codex_analytics::AnalyticsEventsClient;
use codex_code_mode::CodeModeRuntimeFactory;
use codex_exec_server::Environment;
use codex_hooks::Hooks;
use codex_login::AuthManager;
@@ -63,6 +64,7 @@ pub(crate) struct SessionServices {
pub(crate) thread_store: LocalThreadStore,
/// Session-scoped model client shared across turns.
pub(crate) model_client: ModelClient,
pub(crate) code_mode_runtime_factory: CodeModeRuntimeFactory,
pub(crate) code_mode_service: CodeModeService,
pub(crate) environment: Option<Arc<Environment>>,
}

View File

@@ -1,27 +1,15 @@
//! Turn-scoped state and active turn metadata scaffolding.
//! Turn-scoped active turn metadata scaffolding.
use codex_sandboxing::policy_transforms::merge_permission_profiles;
use codex_session_runtime::TurnState;
use indexmap::IndexMap;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio::sync::Notify;
use tokio_util::sync::CancellationToken;
use tokio_util::task::AbortOnDropHandle;
use codex_protocol::dynamic_tools::DynamicToolResponse;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::request_permissions::RequestPermissionsResponse;
use codex_protocol::request_user_input::RequestUserInputResponse;
use codex_rmcp_client::ElicitationResponse;
use rmcp::model::RequestId;
use tokio::sync::oneshot;
use crate::session::turn_context::TurnContext;
use crate::tasks::AnySessionTask;
use codex_protocol::models::PermissionProfile;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::TokenUsage;
/// Metadata about the currently running turn.
pub(crate) struct ActiveTurn {
@@ -29,27 +17,6 @@ pub(crate) struct ActiveTurn {
pub(crate) turn_state: Arc<Mutex<TurnState>>,
}
/// Whether mailbox deliveries should still be folded into the current turn.
///
/// State machine:
/// - A turn starts in `CurrentTurn`, so queued child mail can join the next
/// model request for that turn.
/// - After user-visible terminal output is recorded, we switch to `NextTurn`
/// to leave late child mail queued instead of extending an already shown
/// answer.
/// - If the same task later gets explicit same-turn work again (a steered user
/// prompt or a tool call after an untagged preamble), we reopen `CurrentTurn`
/// so that pending child mail is drained into that follow-up request.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub(crate) enum MailboxDeliveryPhase {
/// Incoming mailbox messages can still be consumed by the current turn.
#[default]
CurrentTurn,
/// The current turn already emitted visible final answer text; mailbox
/// messages should remain queued for a later turn.
NextTurn,
}
impl Default for ActiveTurn {
fn default() -> Self {
Self {
@@ -93,159 +60,6 @@ impl ActiveTurn {
}
}
/// Mutable state for a single turn.
#[derive(Default)]
pub(crate) struct TurnState {
pending_approvals: HashMap<String, oneshot::Sender<ReviewDecision>>,
pending_request_permissions: HashMap<String, oneshot::Sender<RequestPermissionsResponse>>,
pending_user_input: HashMap<String, oneshot::Sender<RequestUserInputResponse>>,
pending_elicitations: HashMap<(String, RequestId), oneshot::Sender<ElicitationResponse>>,
pending_dynamic_tools: HashMap<String, oneshot::Sender<DynamicToolResponse>>,
pending_input: Vec<ResponseInputItem>,
mailbox_delivery_phase: MailboxDeliveryPhase,
granted_permissions: Option<PermissionProfile>,
pub(crate) tool_calls: u64,
pub(crate) token_usage_at_turn_start: TokenUsage,
}
impl TurnState {
pub(crate) fn insert_pending_approval(
&mut self,
key: String,
tx: oneshot::Sender<ReviewDecision>,
) -> Option<oneshot::Sender<ReviewDecision>> {
self.pending_approvals.insert(key, tx)
}
pub(crate) fn remove_pending_approval(
&mut self,
key: &str,
) -> Option<oneshot::Sender<ReviewDecision>> {
self.pending_approvals.remove(key)
}
pub(crate) fn clear_pending(&mut self) {
self.pending_approvals.clear();
self.pending_request_permissions.clear();
self.pending_user_input.clear();
self.pending_elicitations.clear();
self.pending_dynamic_tools.clear();
self.pending_input.clear();
}
pub(crate) fn insert_pending_request_permissions(
&mut self,
key: String,
tx: oneshot::Sender<RequestPermissionsResponse>,
) -> Option<oneshot::Sender<RequestPermissionsResponse>> {
self.pending_request_permissions.insert(key, tx)
}
pub(crate) fn remove_pending_request_permissions(
&mut self,
key: &str,
) -> Option<oneshot::Sender<RequestPermissionsResponse>> {
self.pending_request_permissions.remove(key)
}
pub(crate) fn insert_pending_user_input(
&mut self,
key: String,
tx: oneshot::Sender<RequestUserInputResponse>,
) -> Option<oneshot::Sender<RequestUserInputResponse>> {
self.pending_user_input.insert(key, tx)
}
pub(crate) fn remove_pending_user_input(
&mut self,
key: &str,
) -> Option<oneshot::Sender<RequestUserInputResponse>> {
self.pending_user_input.remove(key)
}
pub(crate) fn insert_pending_elicitation(
&mut self,
server_name: String,
request_id: RequestId,
tx: oneshot::Sender<ElicitationResponse>,
) -> Option<oneshot::Sender<ElicitationResponse>> {
self.pending_elicitations
.insert((server_name, request_id), tx)
}
pub(crate) fn remove_pending_elicitation(
&mut self,
server_name: &str,
request_id: &RequestId,
) -> Option<oneshot::Sender<ElicitationResponse>> {
self.pending_elicitations
.remove(&(server_name.to_string(), request_id.clone()))
}
pub(crate) fn insert_pending_dynamic_tool(
&mut self,
key: String,
tx: oneshot::Sender<DynamicToolResponse>,
) -> Option<oneshot::Sender<DynamicToolResponse>> {
self.pending_dynamic_tools.insert(key, tx)
}
pub(crate) fn remove_pending_dynamic_tool(
&mut self,
key: &str,
) -> Option<oneshot::Sender<DynamicToolResponse>> {
self.pending_dynamic_tools.remove(key)
}
pub(crate) fn push_pending_input(&mut self, input: ResponseInputItem) {
self.pending_input.push(input);
}
pub(crate) fn prepend_pending_input(&mut self, mut input: Vec<ResponseInputItem>) {
if input.is_empty() {
return;
}
input.append(&mut self.pending_input);
self.pending_input = input;
}
pub(crate) fn take_pending_input(&mut self) -> Vec<ResponseInputItem> {
if self.pending_input.is_empty() {
Vec::with_capacity(0)
} else {
let mut ret = Vec::new();
std::mem::swap(&mut ret, &mut self.pending_input);
ret
}
}
pub(crate) fn has_pending_input(&self) -> bool {
!self.pending_input.is_empty()
}
pub(crate) fn accept_mailbox_delivery_for_current_turn(&mut self) {
self.set_mailbox_delivery_phase(MailboxDeliveryPhase::CurrentTurn);
}
pub(crate) fn accepts_mailbox_delivery_for_current_turn(&self) -> bool {
self.mailbox_delivery_phase == MailboxDeliveryPhase::CurrentTurn
}
pub(crate) fn set_mailbox_delivery_phase(&mut self, phase: MailboxDeliveryPhase) {
self.mailbox_delivery_phase = phase;
}
pub(crate) fn record_granted_permissions(&mut self, permissions: PermissionProfile) {
self.granted_permissions =
merge_permission_profiles(self.granted_permissions.as_ref(), Some(&permissions));
}
pub(crate) fn granted_permissions(&self) -> Option<PermissionProfile> {
self.granted_permissions.clone()
}
}
impl ActiveTurn {
/// Clear any pending approvals and input buffered for the current turn.
pub(crate) async fn clear_pending(&self) {

View File

@@ -70,6 +70,22 @@ pub fn thread_manager_with_models_provider_and_home(
)
}
pub fn thread_manager_with_models_provider_home_and_code_mode_runtime_factory(
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
environment_manager: Arc<EnvironmentManager>,
code_mode_runtime_factory: codex_code_mode::CodeModeRuntimeFactory,
) -> ThreadManager {
ThreadManager::with_models_provider_and_home_and_code_mode_runtime_factory_for_tests(
auth,
provider,
codex_home,
environment_manager,
code_mode_runtime_factory,
)
}
pub async fn start_thread_with_user_shell_override(
thread_manager: &ThreadManager,
config: Config,

View File

@@ -18,6 +18,7 @@ use crate::tasks::interrupted_turn_history_marker;
use codex_analytics::AnalyticsEventsClient;
use codex_app_server_protocol::ThreadHistoryBuilder;
use codex_app_server_protocol::TurnStatus;
use codex_code_mode::CodeModeRuntimeFactory;
use codex_exec_server::EnvironmentManager;
use codex_login::AuthManager;
use codex_login::CodexAuth;
@@ -213,6 +214,7 @@ pub(crate) struct ThreadManagerState {
skills_watcher: Arc<SkillsWatcher>,
session_source: SessionSource,
analytics_events_client: Option<AnalyticsEventsClient>,
code_mode_runtime_factory: CodeModeRuntimeFactory,
// Captures submitted ops for testing purpose when test mode is enabled.
ops_log: Option<SharedCapturedOps>,
}
@@ -225,6 +227,26 @@ impl ThreadManager {
collaboration_modes_config: CollaborationModesConfig,
environment_manager: Arc<EnvironmentManager>,
analytics_events_client: Option<AnalyticsEventsClient>,
) -> Self {
Self::new_with_code_mode_runtime_factory(
config,
auth_manager,
session_source,
collaboration_modes_config,
environment_manager,
analytics_events_client,
codex_code_mode::default_runtime_factory(),
)
}
pub fn new_with_code_mode_runtime_factory(
config: &Config,
auth_manager: Arc<AuthManager>,
session_source: SessionSource,
collaboration_modes_config: CollaborationModesConfig,
environment_manager: Arc<EnvironmentManager>,
analytics_events_client: Option<AnalyticsEventsClient>,
code_mode_runtime_factory: CodeModeRuntimeFactory,
) -> Self {
let codex_home = config.codex_home.clone();
let restriction_product = session_source.restriction_product();
@@ -264,6 +286,7 @@ impl ThreadManager {
auth_manager,
session_source,
analytics_events_client,
code_mode_runtime_factory,
ops_log: should_use_test_thread_manager_behavior()
.then(|| Arc::new(std::sync::Mutex::new(Vec::new()))),
}),
@@ -301,6 +324,24 @@ impl ThreadManager {
provider: ModelProviderInfo,
codex_home: PathBuf,
environment_manager: Arc<EnvironmentManager>,
) -> Self {
Self::with_models_provider_and_home_and_code_mode_runtime_factory_for_tests(
auth,
provider,
codex_home,
environment_manager,
codex_code_mode::default_runtime_factory(),
)
}
/// Construct with a dummy AuthManager containing the provided CodexAuth, codex home,
/// and code mode runtime factory. Used for integration tests that exercise code mode.
pub(crate) fn with_models_provider_and_home_and_code_mode_runtime_factory_for_tests(
auth: CodexAuth,
provider: ModelProviderInfo,
codex_home: PathBuf,
environment_manager: Arc<EnvironmentManager>,
code_mode_runtime_factory: CodeModeRuntimeFactory,
) -> Self {
set_thread_manager_test_mode_for_tests(/*enabled*/ true);
let auth_manager = AuthManager::from_auth_for_testing(auth);
@@ -338,6 +379,7 @@ impl ThreadManager {
auth_manager,
session_source: SessionSource::Exec,
analytics_events_client: None,
code_mode_runtime_factory,
ops_log: should_use_test_thread_manager_behavior()
.then(|| Arc::new(std::sync::Mutex::new(Vec::new()))),
}),
@@ -947,6 +989,7 @@ impl ThreadManagerState {
user_shell_override,
parent_trace,
analytics_events_client: self.analytics_events_client.clone(),
code_mode_runtime_factory: Arc::clone(&self.code_mode_runtime_factory),
})
.await?;
self.finalize_thread_spawn(codex, thread_id, watch_registration)

View File

@@ -8,6 +8,7 @@ use crate::tools::registry::ToolKind;
use super::ExecContext;
use super::PUBLIC_TOOL_NAME;
use super::build_enabled_tools;
use super::code_mode_impl;
use super::handle_runtime_response;
pub struct CodeModeExecuteHandler;
@@ -21,7 +22,7 @@ impl CodeModeExecuteHandler {
code: String,
) -> Result<FunctionToolOutput, FunctionCallError> {
let args =
codex_code_mode::parse_exec_source(&code).map_err(FunctionCallError::RespondToModel)?;
code_mode_impl::parse_exec_source(&code).map_err(FunctionCallError::RespondToModel)?;
let exec = ExecContext { session, turn };
let enabled_tools = build_enabled_tools(&exec).await;
let stored_values = exec
@@ -35,7 +36,7 @@ impl CodeModeExecuteHandler {
.session
.services
.code_mode_service
.execute(codex_code_mode::ExecuteRequest {
.execute(code_mode_impl::ExecuteRequest {
tool_call_id: call_id,
enabled_tools,
source: args.code,

View File

@@ -3,12 +3,16 @@ mod response_adapter;
mod wait_handler;
use std::collections::HashSet;
#[cfg(test)]
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use codex_code_mode::CodeModeTurnHost;
use codex_code_mode::RuntimeResponse;
pub(super) use codex_code_mode as code_mode_impl;
use code_mode_impl::CodeModeRuntimeService;
use code_mode_impl::CodeModeTurnHost;
use code_mode_impl::RuntimeResponse;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseInputItem;
@@ -41,9 +45,9 @@ pub(crate) use execute_handler::CodeModeExecuteHandler;
use response_adapter::into_function_call_output_content_items;
pub(crate) use wait_handler::CodeModeWaitHandler;
pub(crate) const PUBLIC_TOOL_NAME: &str = codex_code_mode::PUBLIC_TOOL_NAME;
pub(crate) const WAIT_TOOL_NAME: &str = codex_code_mode::WAIT_TOOL_NAME;
pub(crate) const DEFAULT_WAIT_YIELD_TIME_MS: u64 = codex_code_mode::DEFAULT_WAIT_YIELD_TIME_MS;
pub(crate) const PUBLIC_TOOL_NAME: &str = code_mode_impl::PUBLIC_TOOL_NAME;
pub(crate) const WAIT_TOOL_NAME: &str = code_mode_impl::WAIT_TOOL_NAME;
pub(crate) const DEFAULT_WAIT_YIELD_TIME_MS: u64 = code_mode_impl::DEFAULT_WAIT_YIELD_TIME_MS;
#[derive(Clone)]
pub(crate) struct ExecContext {
@@ -52,16 +56,21 @@ pub(crate) struct ExecContext {
}
pub(crate) struct CodeModeService {
inner: codex_code_mode::CodeModeService,
inner: Arc<dyn CodeModeRuntimeService>,
}
impl CodeModeService {
#[cfg(test)]
pub(crate) fn new(_js_repl_node_path: Option<PathBuf>) -> Self {
Self {
inner: codex_code_mode::CodeModeService::new(),
inner: Arc::new(code_mode_impl::CodeModeService::new()),
}
}
pub(crate) fn from_runtime(inner: Arc<dyn CodeModeRuntimeService>) -> Self {
Self { inner }
}
pub(crate) async fn stored_values(&self) -> std::collections::HashMap<String, JsonValue> {
self.inner.stored_values().await
}
@@ -75,14 +84,14 @@ impl CodeModeService {
pub(crate) async fn execute(
&self,
request: codex_code_mode::ExecuteRequest,
request: code_mode_impl::ExecuteRequest,
) -> Result<RuntimeResponse, String> {
self.inner.execute(request).await
}
pub(crate) async fn wait(
&self,
request: codex_code_mode::WaitRequest,
request: code_mode_impl::WaitRequest,
) -> Result<RuntimeResponse, String> {
self.inner.wait(request).await
}
@@ -93,7 +102,7 @@ impl CodeModeService {
turn: &Arc<TurnContext>,
router: Arc<ToolRouter>,
tracker: SharedTurnDiffTracker,
) -> Option<codex_code_mode::CodeModeTurnWorker> {
) -> Option<Box<dyn Send>> {
if !turn.features.enabled(Feature::CodeMode) {
return None;
}
@@ -251,9 +260,7 @@ fn truncate_code_mode_result(
truncate_function_output_items_with_policy(&items, policy)
}
pub(super) async fn build_enabled_tools(
exec: &ExecContext,
) -> Vec<codex_code_mode::ToolDefinition> {
pub(super) async fn build_enabled_tools(exec: &ExecContext) -> Vec<code_mode_impl::ToolDefinition> {
let router = build_nested_router(exec).await;
let specs = router.specs();
collect_code_mode_tool_definitions(&specs)
@@ -340,18 +347,18 @@ async fn call_nested_tool(
Ok(result.code_mode_result())
}
fn tool_kind_for_spec(spec: &ToolSpec) -> codex_code_mode::CodeModeToolKind {
fn tool_kind_for_spec(spec: &ToolSpec) -> code_mode_impl::CodeModeToolKind {
if matches!(spec, ToolSpec::Freeform(_)) {
codex_code_mode::CodeModeToolKind::Freeform
code_mode_impl::CodeModeToolKind::Freeform
} else {
codex_code_mode::CodeModeToolKind::Function
code_mode_impl::CodeModeToolKind::Function
}
}
fn tool_kind_for_name(
spec: Option<ToolSpec>,
tool_name: &ToolName,
) -> Result<codex_code_mode::CodeModeToolKind, String> {
) -> Result<code_mode_impl::CodeModeToolKind, String> {
spec.as_ref()
.map(tool_kind_for_spec)
.ok_or_else(|| format!("tool `{tool_name}` is not enabled in {PUBLIC_TOOL_NAME}"))
@@ -364,12 +371,8 @@ fn build_nested_tool_payload(
) -> Result<ToolPayload, String> {
let actual_kind = tool_kind_for_name(spec, tool_name)?;
match actual_kind {
codex_code_mode::CodeModeToolKind::Function => {
build_function_tool_payload(tool_name, input)
}
codex_code_mode::CodeModeToolKind::Freeform => {
build_freeform_tool_payload(tool_name, input)
}
code_mode_impl::CodeModeToolKind::Function => build_function_tool_payload(tool_name, input),
code_mode_impl::CodeModeToolKind::Freeform => build_freeform_tool_payload(tool_name, input),
}
}

View File

@@ -1,13 +1,15 @@
use codex_code_mode::ImageDetail as CodeModeImageDetail;
use code_mode_impl::ImageDetail as CodeModeImageDetail;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::ImageDetail;
use super::code_mode_impl;
trait IntoProtocol<T> {
fn into_protocol(self) -> T;
}
pub(super) fn into_function_call_output_content_items(
items: Vec<codex_code_mode::FunctionCallOutputContentItem>,
items: Vec<code_mode_impl::FunctionCallOutputContentItem>,
) -> Vec<FunctionCallOutputContentItem> {
items.into_iter().map(IntoProtocol::into_protocol).collect()
}
@@ -24,16 +26,14 @@ impl IntoProtocol<ImageDetail> for CodeModeImageDetail {
}
}
impl IntoProtocol<FunctionCallOutputContentItem>
for codex_code_mode::FunctionCallOutputContentItem
{
impl IntoProtocol<FunctionCallOutputContentItem> for code_mode_impl::FunctionCallOutputContentItem {
fn into_protocol(self) -> FunctionCallOutputContentItem {
let value = self;
match value {
codex_code_mode::FunctionCallOutputContentItem::InputText { text } => {
code_mode_impl::FunctionCallOutputContentItem::InputText { text } => {
FunctionCallOutputContentItem::InputText { text }
}
codex_code_mode::FunctionCallOutputContentItem::InputImage { image_url, detail } => {
code_mode_impl::FunctionCallOutputContentItem::InputImage { image_url, detail } => {
FunctionCallOutputContentItem::InputImage {
image_url,
detail: detail.map(IntoProtocol::into_protocol),

View File

@@ -10,6 +10,7 @@ use crate::tools::registry::ToolKind;
use super::DEFAULT_WAIT_YIELD_TIME_MS;
use super::ExecContext;
use super::WAIT_TOOL_NAME;
use super::code_mode_impl;
use super::handle_runtime_response;
pub struct CodeModeWaitHandler;
@@ -65,7 +66,7 @@ impl ToolHandler for CodeModeWaitHandler {
.session
.services
.code_mode_service
.wait(codex_code_mode::WaitRequest {
.wait(code_mode_impl::WaitRequest {
cell_id: args.cell_id,
yield_time_ms: args.yield_time_ms,
terminate: args.terminate,

View File

@@ -51,7 +51,6 @@ pub use test_sync::TestSyncHandler;
pub use tool_search::ToolSearchHandler;
pub use tool_suggest::ToolSuggestHandler;
pub use unavailable_tool::UnavailableToolHandler;
pub(crate) use unavailable_tool::unavailable_tool_message;
pub use unified_exec::UnifiedExecHandler;
pub use view_image::ViewImageHandler;

View File

@@ -52,9 +52,9 @@ use codex_utils_output_truncation::TruncationPolicy;
use codex_utils_output_truncation::truncate_text;
pub(crate) const JS_REPL_PRAGMA_PREFIX: &str = "// codex-js-repl:";
const KERNEL_SOURCE: &str = include_str!("kernel.js");
const MERIYAH_UMD: &str = include_str!("meriyah.umd.min.js");
const JS_REPL_MIN_NODE_VERSION: &str = include_str!("../../../../node-version.txt");
const KERNEL_SOURCE: &str = codex_js_repl::KERNEL_SOURCE;
const MERIYAH_UMD: &str = codex_js_repl::MERIYAH_UMD;
const JS_REPL_MIN_NODE_VERSION: &str = codex_js_repl::MIN_NODE_VERSION;
const JS_REPL_STDERR_TAIL_LINE_LIMIT: usize = 20;
const JS_REPL_STDERR_TAIL_LINE_MAX_BYTES: usize = 512;
const JS_REPL_STDERR_TAIL_MAX_BYTES: usize = 4_096;

View File

@@ -7,23 +7,15 @@ use crate::tools::handlers::multi_agents_common::MIN_WAIT_TIMEOUT_MS;
use crate::tools::registry::ToolRegistryBuilder;
use codex_mcp::ToolInfo;
use codex_protocol::dynamic_tools::DynamicToolSpec;
use codex_tools::AdditionalProperties;
use codex_tool_spec::ToolSpecPlanParams;
use codex_tool_spec::build_tool_spec_plan;
use codex_tools::DiscoverableTool;
use codex_tools::JsonSchema;
use codex_tools::ResponsesApiTool;
use codex_tools::ToolHandlerKind;
use codex_tools::ToolName;
use codex_tools::ToolNamespace;
use codex_tools::ToolRegistryPlanDeferredTool;
use codex_tools::ToolRegistryPlanMcpTool;
use codex_tools::ToolRegistryPlanParams;
use codex_tools::ToolUserShellType;
use codex_tools::ToolsConfig;
use codex_tools::WaitAgentTimeoutOptions;
use codex_tools::augment_tool_spec_for_code_mode;
use codex_tools::build_tool_registry_plan;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
pub(crate) fn tool_user_shell_type(user_shell: &Shell) -> ToolUserShellType {
@@ -36,38 +28,6 @@ pub(crate) fn tool_user_shell_type(user_shell: &Shell) -> ToolUserShellType {
}
}
struct McpToolPlanInputs<'a> {
mcp_tools: Vec<ToolRegistryPlanMcpTool<'a>>,
tool_namespaces: HashMap<String, ToolNamespace>,
}
fn map_mcp_tools_for_plan(mcp_tools: &HashMap<String, ToolInfo>) -> McpToolPlanInputs<'_> {
McpToolPlanInputs {
mcp_tools: mcp_tools
.values()
.map(|tool| ToolRegistryPlanMcpTool {
name: tool.canonical_tool_name(),
tool: &tool.tool,
})
.collect(),
tool_namespaces: mcp_tools
.values()
.map(|tool| {
(
tool.callable_namespace.clone(),
ToolNamespace {
name: tool.callable_namespace.clone(),
description: tool
.connector_description
.clone()
.or_else(|| tool.server_instructions.clone()),
},
)
})
.collect(),
}
}
pub(crate) fn build_specs_with_discoverable_tools(
config: &ToolsConfig,
mcp_tools: Option<HashMap<String, ToolInfo>>,
@@ -107,34 +67,17 @@ pub(crate) fn build_specs_with_discoverable_tools(
use crate::tools::handlers::multi_agents_v2::SendMessageHandler as SendMessageHandlerV2;
use crate::tools::handlers::multi_agents_v2::SpawnAgentHandler as SpawnAgentHandlerV2;
use crate::tools::handlers::multi_agents_v2::WaitAgentHandler as WaitAgentHandlerV2;
use crate::tools::handlers::unavailable_tool_message;
use crate::tools::tool_search_entry::build_tool_search_entries;
let mut builder = ToolRegistryBuilder::new();
let mcp_tool_plan_inputs = mcp_tools.as_ref().map(map_mcp_tools_for_plan);
let deferred_mcp_tool_sources = deferred_mcp_tools.as_ref().map(|tools| {
tools
.values()
.map(|tool| ToolRegistryPlanDeferredTool {
name: tool.canonical_tool_name(),
server_name: tool.server_name.as_str(),
connector_name: tool.connector_name.as_deref(),
connector_description: tool.connector_description.as_deref(),
})
.collect::<Vec<_>>()
});
let default_agent_type_description =
crate::agent::role::spawn_tool_spec::build(&std::collections::BTreeMap::new());
let plan = build_tool_registry_plan(
let plan = build_tool_spec_plan(
config,
ToolRegistryPlanParams {
mcp_tools: mcp_tool_plan_inputs
.as_ref()
.map(|inputs| inputs.mcp_tools.as_slice()),
deferred_mcp_tools: deferred_mcp_tool_sources.as_deref(),
tool_namespaces: mcp_tool_plan_inputs
.as_ref()
.map(|inputs| &inputs.tool_namespaces),
ToolSpecPlanParams {
mcp_tools: mcp_tools.as_ref(),
deferred_mcp_tools: deferred_mcp_tools.as_ref(),
unavailable_called_tools,
discoverable_tools: discoverable_tools.as_deref(),
dynamic_tools,
default_agent_type_description: &default_agent_type_description,
@@ -145,6 +88,8 @@ pub(crate) fn build_specs_with_discoverable_tools(
},
},
);
let unavailable_called_tools = plan.unavailable_called_tools;
let plan = plan.registry_plan;
let shell_handler = Arc::new(ShellHandler);
let unified_exec_handler = Arc::new(UnifiedExecHandler);
let plan_handler = Arc::new(PlanHandler);
@@ -170,11 +115,6 @@ pub(crate) fn build_specs_with_discoverable_tools(
let js_repl_handler = Arc::new(JsReplHandler);
let js_repl_reset_handler = Arc::new(JsReplResetHandler);
let unavailable_tool_handler = Arc::new(UnavailableToolHandler);
let mut existing_spec_names = plan
.specs
.iter()
.map(|configured_tool| configured_tool.name().to_string())
.collect::<HashSet<_>>();
for spec in plan.specs {
if spec.supports_parallel_tool_calls {
@@ -303,35 +243,7 @@ pub(crate) fn build_specs_with_discoverable_tools(
}
for unavailable_tool in unavailable_called_tools {
let tool_name = unavailable_tool.display();
if existing_spec_names.insert(tool_name.clone()) {
let spec = codex_tools::ToolSpec::Function(ResponsesApiTool {
name: tool_name.clone(),
description: unavailable_tool_message(
&tool_name,
"Calling this placeholder returns an error explaining that the tool is unavailable.",
),
strict: false,
parameters: JsonSchema::object(
Default::default(),
/*required*/ None,
Some(AdditionalProperties::Boolean(false)),
),
output_schema: None,
defer_loading: None,
});
let spec = if config.code_mode_enabled {
augment_tool_spec_for_code_mode(spec)
} else {
spec
};
builder.push_spec(spec);
}
builder.register_handler(unavailable_tool, unavailable_tool_handler.clone());
}
builder
}
#[cfg(test)]
#[path = "spec_tests.rs"]
mod tests;

View File

@@ -15,6 +15,7 @@ anyhow = { workspace = true }
assert_cmd = { workspace = true }
base64 = { workspace = true }
codex-arg0 = { workspace = true }
codex-code-mode-runtime = { workspace = true }
codex-core = { workspace = true }
codex-exec-server = { workspace = true }
codex-features = { workspace = true }

View File

@@ -382,20 +382,22 @@ impl TestCodexBuilder {
) -> anyhow::Result<TestCodex> {
let auth = self.auth.clone();
let thread_manager = if config.model_catalog.is_some() {
ThreadManager::new(
ThreadManager::new_with_code_mode_runtime_factory(
&config,
codex_core::test_support::auth_manager_from_auth(auth.clone()),
SessionSource::Exec,
CollaborationModesConfig::default(),
Arc::clone(&environment_manager),
/*analytics_events_client*/ None,
codex_code_mode_runtime::runtime_factory(),
)
} else {
codex_core::test_support::thread_manager_with_models_provider_and_home(
codex_core::test_support::thread_manager_with_models_provider_home_and_code_mode_runtime_factory(
auth.clone(),
config.model_provider.clone(),
config.codex_home.to_path_buf(),
Arc::clone(&environment_manager),
codex_code_mode_runtime::runtime_factory(),
)
};
let thread_manager = Arc::new(thread_manager);

View File

@@ -23,6 +23,7 @@ use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::TurnCompleteEvent;
use codex_protocol::protocol::TurnStartedEvent;
use codex_protocol::user_input::UserInput;
use codex_thread_store::ArchiveThreadParams;
use codex_thread_store::LocalThreadStore;
use codex_thread_store::ThreadStore;

View File

@@ -138,8 +138,28 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration as StdDuration;
#[path = "codex_tests_guardian.rs"]
mod guardian_tests;
fn manifest_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
if manifest_dir.is_absolute() {
manifest_dir
} else {
let current_dir = std::env::current_dir().expect("current dir");
let joined = current_dir.join(&manifest_dir);
if joined.exists() {
return joined;
}
manifest_dir
.strip_prefix("codex-rs")
.ok()
.map(|relative_manifest_dir| current_dir.join(relative_manifest_dir))
.unwrap_or(joined)
}
}
struct InstructionsTestCase {
slug: &'static str,
expects_apply_patch_description: bool,
@@ -1573,7 +1593,7 @@ async fn fork_startup_context_then_first_turn_diff_snapshot() -> anyhow::Result<
);
let mut settings = insta::Settings::clone_current();
settings.set_snapshot_path("snapshots");
settings.set_snapshot_path(manifest_dir().join("src/session/snapshots"));
settings.set_prepend_module_to_snapshot(false);
settings.bind(|| {
insta::assert_snapshot!(
@@ -2948,6 +2968,7 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() {
.expect("create environment"),
)),
/*analytics_events_client*/ None,
/*code_mode_runtime_factory*/ None,
)
.await;
@@ -3109,6 +3130,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) {
code_mode_service: crate::tools::code_mode::CodeModeService::new(
config.js_repl_node_path.clone(),
),
code_mode_runtime_factory: codex_code_mode::default_runtime_factory(),
environment: Some(Arc::clone(&environment)),
};
let js_repl = Arc::new(JsReplHandle::with_node_path(
@@ -3270,6 +3292,7 @@ async fn make_session_with_config_and_rx(
.expect("create environment"),
)),
/*analytics_events_client*/ None,
/*code_mode_runtime_factory*/ None,
)
.await?;
@@ -4072,6 +4095,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx(
code_mode_service: crate::tools::code_mode::CodeModeService::new(
config.js_repl_node_path.clone(),
),
code_mode_runtime_factory: codex_code_mode::default_runtime_factory(),
environment: Some(Arc::clone(&environment)),
};
let js_repl = Arc::new(JsReplHandle::with_node_path(

View File

@@ -456,6 +456,7 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() {
user_shell_override: None,
parent_trace: None,
analytics_events_client: None,
code_mode_runtime_factory: codex_code_mode::default_runtime_factory(),
})
.await
.expect("spawn guardian subagent");

View File

@@ -3,6 +3,7 @@ use crate::agents_md::LOCAL_AGENTS_MD_FILENAME;
use crate::config::edit::ConfigEdit;
use crate::config::edit::ConfigEditsBuilder;
use crate::config::edit::apply_blocking;
use crate::config::edit::set_project_trust_level_inner;
use crate::config_loader::RequirementSource;
use crate::config_loader::project_trust_key;
use crate::plugins::PluginsManager;
@@ -75,6 +76,7 @@ use std::collections::HashMap;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use toml_edit::DocumentMut;
fn stdio_mcp(command: &str) -> McpServerConfig {
McpServerConfig {

View File

@@ -52,11 +52,31 @@ use insta::Settings;
use insta::assert_snapshot;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tempfile::TempDir;
use tokio_util::sync::CancellationToken;
fn manifest_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
if manifest_dir.is_absolute() {
manifest_dir
} else {
let current_dir = std::env::current_dir().expect("current dir");
let joined = current_dir.join(&manifest_dir);
if joined.exists() {
return joined;
}
manifest_dir
.strip_prefix("codex-rs")
.ok()
.map(|relative_manifest_dir| current_dir.join(relative_manifest_dir))
.unwrap_or(joined)
}
}
fn fixed_guardian_parent_session_id() -> ThreadId {
ThreadId::from_string("11111111-1111-4111-8111-111111111111")
.expect("fixed parent session id should be a valid UUID")
@@ -924,17 +944,20 @@ async fn guardian_review_request_layout_matches_model_visible_request_snapshot()
assert_eq!(assessment.outcome, GuardianAssessmentOutcome::Allow);
let request = request_log.single_request();
let snapshot =
normalize_guardian_snapshot_paths(context_snapshot::format_labeled_requests_snapshot(
"Guardian review request layout",
&[("Guardian Review Request", &request)],
&guardian_snapshot_options(),
));
let mut settings = Settings::clone_current();
settings.set_snapshot_path("snapshots");
settings.set_snapshot_path(manifest_dir().join("src/guardian/snapshots"));
settings.set_prepend_module_to_snapshot(false);
settings.bind(|| {
assert_snapshot!(
"codex_core__guardian__tests__guardian_review_request_layout",
normalize_guardian_snapshot_paths(context_snapshot::format_labeled_requests_snapshot(
"Guardian review request layout",
&[("Guardian Review Request", &request)],
&guardian_snapshot_options(),
))
snapshot
);
});
@@ -1200,27 +1223,27 @@ async fn guardian_reuses_prompt_cache_key_and_appends_prior_reviews() -> anyhow:
);
assert!(!second_user_message.contains("[1] user: Please check the repo visibility"));
let snapshot = format!(
"{}\n\nshared_prompt_cache_key: {}\nfollowup_contains_first_rationale: {}",
normalize_guardian_snapshot_paths(context_snapshot::format_labeled_requests_snapshot(
"Guardian follow-up review request layout",
&[
("Initial Guardian Review Request", &requests[0]),
("Follow-up Guardian Review Request", &requests[1]),
],
&guardian_snapshot_options(),
)),
first_body["prompt_cache_key"] == second_body["prompt_cache_key"],
second_body.to_string().contains(first_rationale),
);
let mut settings = Settings::clone_current();
settings.set_snapshot_path("snapshots");
settings.set_snapshot_path(manifest_dir().join("src/guardian/snapshots"));
settings.set_prepend_module_to_snapshot(false);
settings.bind(|| {
assert_snapshot!(
"codex_core__guardian__tests__guardian_followup_review_request_layout",
format!(
"{}\n\nshared_prompt_cache_key: {}\nfollowup_contains_first_rationale: {}",
normalize_guardian_snapshot_paths(
context_snapshot::format_labeled_requests_snapshot(
"Guardian follow-up review request layout",
&[
("Initial Guardian Review Request", &requests[0]),
("Follow-up Guardian Review Request", &requests[1]),
],
&guardian_snapshot_options(),
)
),
first_body["prompt_cache_key"] == second_body["prompt_cache_key"],
second_body.to_string().contains(first_rationale),
)
snapshot
);
});

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "file-watcher",
crate_name = "codex_file_watcher",
)

View File

@@ -0,0 +1,22 @@
[package]
name = "codex-file-watcher"
version.workspace = true
edition.workspace = true
license.workspace = true
[lib]
name = "codex_file_watcher"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
notify = { workspace = true }
tokio = { workspace = true, features = ["rt", "sync", "time"] }
tracing = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "time"] }

View File

@@ -0,0 +1,589 @@
//! Watches subscribed files or directories and routes coarse-grained change
//! notifications to the subscribers that own matching watched paths.
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::RwLock;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::time::Duration;
use notify::Event;
use notify::EventKind;
use notify::RecommendedWatcher;
use notify::RecursiveMode;
use notify::Watcher;
use tokio::runtime::Handle;
use tokio::sync::Mutex as AsyncMutex;
use tokio::sync::Notify;
use tokio::sync::mpsc;
use tokio::time::Instant;
use tokio::time::sleep_until;
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq)]
/// Coalesced file change notification for a subscriber.
pub struct FileWatcherEvent {
/// Changed paths delivered in sorted order with duplicates removed.
pub paths: Vec<PathBuf>,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// Path subscription registered by a [`FileWatcherSubscriber`].
pub struct WatchPath {
/// Root path to watch.
pub path: PathBuf,
/// Whether events below `path` should match recursively.
pub recursive: bool,
}
type SubscriberId = u64;
#[derive(Default)]
struct WatchState {
next_subscriber_id: SubscriberId,
path_ref_counts: HashMap<PathBuf, PathWatchCounts>,
subscribers: HashMap<SubscriberId, SubscriberState>,
}
struct SubscriberState {
watched_paths: HashMap<WatchPath, usize>,
tx: WatchSender,
}
/// Receives coalesced change notifications for a single subscriber.
pub struct Receiver {
inner: Arc<ReceiverInner>,
}
struct WatchSender {
inner: Arc<ReceiverInner>,
}
struct ReceiverInner {
changed_paths: AsyncMutex<BTreeSet<PathBuf>>,
notify: Notify,
sender_count: AtomicUsize,
}
impl Receiver {
/// Waits for the next batch of changed paths, or returns `None` once the
/// corresponding subscriber has been removed and no more events can arrive.
pub async fn recv(&mut self) -> Option<FileWatcherEvent> {
loop {
let notified = self.inner.notify.notified();
{
let mut changed_paths = self.inner.changed_paths.lock().await;
if !changed_paths.is_empty() {
return Some(FileWatcherEvent {
paths: std::mem::take(&mut *changed_paths).into_iter().collect(),
});
}
if self.inner.sender_count.load(Ordering::Acquire) == 0 {
return None;
}
}
notified.await;
}
}
}
impl WatchSender {
async fn add_changed_paths(&self, paths: &[PathBuf]) {
if paths.is_empty() {
return;
}
let mut changed_paths = self.inner.changed_paths.lock().await;
let previous_len = changed_paths.len();
changed_paths.extend(paths.iter().cloned());
if changed_paths.len() != previous_len {
self.inner.notify.notify_one();
}
}
}
impl Clone for WatchSender {
fn clone(&self) -> Self {
self.inner.sender_count.fetch_add(1, Ordering::Relaxed);
Self {
inner: Arc::clone(&self.inner),
}
}
}
impl Drop for WatchSender {
fn drop(&mut self) {
if self.inner.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 {
self.inner.notify.notify_waiters();
}
}
}
fn watch_channel() -> (WatchSender, Receiver) {
let inner = Arc::new(ReceiverInner {
changed_paths: AsyncMutex::new(BTreeSet::new()),
notify: Notify::new(),
sender_count: AtomicUsize::new(1),
});
(
WatchSender {
inner: Arc::clone(&inner),
},
Receiver { inner },
)
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
struct PathWatchCounts {
non_recursive: usize,
recursive: usize,
}
impl PathWatchCounts {
fn increment(&mut self, recursive: bool, amount: usize) {
if recursive {
self.recursive += amount;
} else {
self.non_recursive += amount;
}
}
fn decrement(&mut self, recursive: bool, amount: usize) {
if recursive {
self.recursive = self.recursive.saturating_sub(amount);
} else {
self.non_recursive = self.non_recursive.saturating_sub(amount);
}
}
fn effective_mode(self) -> Option<RecursiveMode> {
if self.recursive > 0 {
Some(RecursiveMode::Recursive)
} else if self.non_recursive > 0 {
Some(RecursiveMode::NonRecursive)
} else {
None
}
}
fn is_empty(self) -> bool {
self.non_recursive == 0 && self.recursive == 0
}
}
struct FileWatcherInner {
watcher: RecommendedWatcher,
watched_paths: HashMap<PathBuf, RecursiveMode>,
}
/// Coalesces bursts of watch notifications and emits at most once per interval.
pub struct ThrottledWatchReceiver {
rx: Receiver,
interval: Duration,
next_allowed: Option<Instant>,
}
impl ThrottledWatchReceiver {
/// Creates a throttling wrapper around a raw watcher [`Receiver`].
pub fn new(rx: Receiver, interval: Duration) -> Self {
Self {
rx,
interval,
next_allowed: None,
}
}
/// Receives the next event, enforcing the configured minimum delay after
/// the previous emission.
pub async fn recv(&mut self) -> Option<FileWatcherEvent> {
if let Some(next_allowed) = self.next_allowed {
sleep_until(next_allowed).await;
}
let event = self.rx.recv().await;
if event.is_some() {
self.next_allowed = Some(Instant::now() + self.interval);
}
event
}
}
/// Handle used to register watched paths for one logical consumer.
pub struct FileWatcherSubscriber {
id: SubscriberId,
file_watcher: Arc<FileWatcher>,
}
impl FileWatcherSubscriber {
/// Registers a single path for this subscriber and returns an RAII guard
/// that unregisters it on drop.
pub fn register_path(&self, path: PathBuf, recursive: bool) -> WatchRegistration {
self.register_paths(vec![WatchPath { path, recursive }])
}
/// Registers the provided paths for this subscriber and returns an RAII
/// guard that unregisters them on drop.
pub fn register_paths(&self, watched_paths: Vec<WatchPath>) -> WatchRegistration {
let watched_paths = dedupe_watched_paths(watched_paths);
self.file_watcher.register_paths(self.id, &watched_paths);
WatchRegistration {
file_watcher: Arc::downgrade(&self.file_watcher),
subscriber_id: self.id,
watched_paths,
}
}
}
impl Drop for FileWatcherSubscriber {
fn drop(&mut self) {
self.file_watcher.remove_subscriber(self.id);
}
}
/// RAII guard for a set of active path registrations.
pub struct WatchRegistration {
file_watcher: std::sync::Weak<FileWatcher>,
subscriber_id: SubscriberId,
watched_paths: Vec<WatchPath>,
}
impl Default for WatchRegistration {
fn default() -> Self {
Self {
file_watcher: std::sync::Weak::new(),
subscriber_id: 0,
watched_paths: Vec::new(),
}
}
}
impl Drop for WatchRegistration {
fn drop(&mut self) {
if let Some(file_watcher) = self.file_watcher.upgrade() {
file_watcher.unregister_paths(self.subscriber_id, &self.watched_paths);
}
}
}
/// Multi-subscriber file watcher built on top of `notify`.
pub struct FileWatcher {
inner: Option<Mutex<FileWatcherInner>>,
state: Arc<RwLock<WatchState>>,
}
impl FileWatcher {
/// Creates a live filesystem watcher and starts its background event loop
/// on the current Tokio runtime.
pub fn new() -> notify::Result<Self> {
let (raw_tx, raw_rx) = mpsc::unbounded_channel();
let raw_tx_clone = raw_tx;
let watcher = notify::recommended_watcher(move |res| {
let _ = raw_tx_clone.send(res);
})?;
let inner = FileWatcherInner {
watcher,
watched_paths: HashMap::new(),
};
let state = Arc::new(RwLock::new(WatchState::default()));
let file_watcher = Self {
inner: Some(Mutex::new(inner)),
state,
};
file_watcher.spawn_event_loop(raw_rx);
Ok(file_watcher)
}
/// Creates an inert watcher that only supports test-driven synthetic
/// notifications.
pub fn noop() -> Self {
Self {
inner: None,
state: Arc::new(RwLock::new(WatchState::default())),
}
}
/// Adds a new subscriber and returns both its registration handle and its
/// dedicated event receiver.
pub fn add_subscriber(self: &Arc<Self>) -> (FileWatcherSubscriber, Receiver) {
let (tx, rx) = watch_channel();
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let subscriber_id = state.next_subscriber_id;
state.next_subscriber_id += 1;
state.subscribers.insert(
subscriber_id,
SubscriberState {
watched_paths: HashMap::new(),
tx,
},
);
let subscriber = FileWatcherSubscriber {
id: subscriber_id,
file_watcher: self.clone(),
};
(subscriber, rx)
}
fn register_paths(&self, subscriber_id: SubscriberId, watched_paths: &[WatchPath]) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for watched_path in watched_paths {
{
let Some(subscriber) = state.subscribers.get_mut(&subscriber_id) else {
return;
};
*subscriber
.watched_paths
.entry(watched_path.clone())
.or_default() += 1;
}
let counts = state
.path_ref_counts
.entry(watched_path.path.clone())
.or_default();
let previous_mode = counts.effective_mode();
counts.increment(watched_path.recursive, /*amount*/ 1);
let next_mode = counts.effective_mode();
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
// Bridge `notify`'s callback-based events into the Tokio runtime and
// notify the matching subscribers.
fn spawn_event_loop(&self, mut raw_rx: mpsc::UnboundedReceiver<notify::Result<Event>>) {
if let Ok(handle) = Handle::try_current() {
let state = Arc::clone(&self.state);
handle.spawn(async move {
loop {
match raw_rx.recv().await {
Some(Ok(event)) => {
if !is_mutating_event(&event) {
continue;
}
if event.paths.is_empty() {
continue;
}
Self::notify_subscribers(&state, &event.paths).await;
}
Some(Err(err)) => {
warn!("file watcher error: {err}");
}
None => break,
}
}
});
} else {
warn!("file watcher loop skipped: no Tokio runtime available");
}
}
fn unregister_paths(&self, subscriber_id: SubscriberId, watched_paths: &[WatchPath]) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for watched_path in watched_paths {
{
let Some(subscriber) = state.subscribers.get_mut(&subscriber_id) else {
return;
};
let Some(subscriber_count) = subscriber.watched_paths.get_mut(watched_path) else {
continue;
};
*subscriber_count = subscriber_count.saturating_sub(1);
if *subscriber_count == 0 {
subscriber.watched_paths.remove(watched_path);
}
}
let Some(counts) = state.path_ref_counts.get_mut(&watched_path.path) else {
continue;
};
let previous_mode = counts.effective_mode();
counts.decrement(watched_path.recursive, /*amount*/ 1);
let next_mode = counts.effective_mode();
if counts.is_empty() {
state.path_ref_counts.remove(&watched_path.path);
}
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
fn remove_subscriber(&self, subscriber_id: SubscriberId) {
let mut state = self
.state
.write()
.unwrap_or_else(std::sync::PoisonError::into_inner);
let Some(subscriber) = state.subscribers.remove(&subscriber_id) else {
return;
};
let mut inner_guard: Option<std::sync::MutexGuard<'_, FileWatcherInner>> = None;
for (watched_path, count) in subscriber.watched_paths {
let Some(path_counts) = state.path_ref_counts.get_mut(&watched_path.path) else {
continue;
};
let previous_mode = path_counts.effective_mode();
path_counts.decrement(watched_path.recursive, count);
let next_mode = path_counts.effective_mode();
if path_counts.is_empty() {
state.path_ref_counts.remove(&watched_path.path);
}
if previous_mode != next_mode {
self.reconfigure_watch(&watched_path.path, next_mode, &mut inner_guard);
}
}
}
fn reconfigure_watch<'a>(
&'a self,
path: &Path,
next_mode: Option<RecursiveMode>,
inner_guard: &mut Option<std::sync::MutexGuard<'a, FileWatcherInner>>,
) {
let Some(inner) = &self.inner else {
return;
};
if inner_guard.is_none() {
let guard = inner
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
*inner_guard = Some(guard);
}
let Some(guard) = inner_guard.as_mut() else {
return;
};
let existing_mode = guard.watched_paths.get(path).copied();
if existing_mode == next_mode {
return;
}
if existing_mode.is_some() {
if let Err(err) = guard.watcher.unwatch(path) {
warn!("failed to unwatch {}: {err}", path.display());
}
guard.watched_paths.remove(path);
}
let Some(next_mode) = next_mode else {
return;
};
if !path.exists() {
return;
}
if let Err(err) = guard.watcher.watch(path, next_mode) {
warn!("failed to watch {}: {err}", path.display());
return;
}
guard.watched_paths.insert(path.to_path_buf(), next_mode);
}
async fn notify_subscribers(state: &RwLock<WatchState>, event_paths: &[PathBuf]) {
let subscribers_to_notify: Vec<(WatchSender, Vec<PathBuf>)> = {
let state = state
.read()
.unwrap_or_else(std::sync::PoisonError::into_inner);
state
.subscribers
.values()
.filter_map(|subscriber| {
let changed_paths: Vec<PathBuf> = event_paths
.iter()
.filter(|event_path| {
subscriber.watched_paths.keys().any(|watched_path| {
watch_path_matches_event(watched_path, event_path)
})
})
.cloned()
.collect();
(!changed_paths.is_empty()).then_some((subscriber.tx.clone(), changed_paths))
})
.collect()
};
for (subscriber, changed_paths) in subscribers_to_notify {
subscriber.add_changed_paths(&changed_paths).await;
}
}
#[doc(hidden)]
pub async fn send_paths_for_test(&self, paths: Vec<PathBuf>) {
Self::notify_subscribers(&self.state, &paths).await;
}
#[cfg(test)]
pub(crate) fn spawn_event_loop_for_test(
&self,
raw_rx: mpsc::UnboundedReceiver<notify::Result<Event>>,
) {
self.spawn_event_loop(raw_rx);
}
#[cfg(test)]
pub(crate) fn watch_counts_for_test(&self, path: &Path) -> Option<(usize, usize)> {
let state = self
.state
.read()
.unwrap_or_else(std::sync::PoisonError::into_inner);
state
.path_ref_counts
.get(path)
.map(|counts| (counts.non_recursive, counts.recursive))
}
}
fn is_mutating_event(event: &Event) -> bool {
matches!(
event.kind,
EventKind::Create(_) | EventKind::Modify(_) | EventKind::Remove(_)
)
}
fn dedupe_watched_paths(mut watched_paths: Vec<WatchPath>) -> Vec<WatchPath> {
watched_paths.sort_unstable_by(|a, b| {
a.path
.as_os_str()
.cmp(b.path.as_os_str())
.then(a.recursive.cmp(&b.recursive))
});
watched_paths.dedup();
watched_paths
}
fn watch_path_matches_event(watched_path: &WatchPath, event_path: &Path) -> bool {
if event_path == watched_path.path {
return true;
}
if watched_path.path.starts_with(event_path) {
return true;
}
if !event_path.starts_with(&watched_path.path) {
return false;
}
watched_path.recursive || event_path.parent() == Some(watched_path.path.as_path())
}
#[cfg(test)]
#[path = "file_watcher_tests.rs"]
mod tests;

View File

@@ -0,0 +1,11 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "js-repl",
crate_name = "codex_js_repl",
compile_data = [
"//codex-rs:node-version.txt",
"src/kernel.js",
"src/meriyah.umd.min.js",
],
)

View File

@@ -0,0 +1,13 @@
[package]
edition.workspace = true
license.workspace = true
name = "codex-js-repl"
version.workspace = true
[lib]
doctest = false
name = "codex_js_repl"
path = "src/lib.rs"
[lints]
workspace = true

View File

@@ -0,0 +1,3 @@
pub const KERNEL_SOURCE: &str = include_str!("kernel.js");
pub const MERIYAH_UMD: &str = include_str!("meriyah.umd.min.js");
pub const MIN_NODE_VERSION: &str = include_str!("../../node-version.txt");

Some files were not shown because too many files have changed in this diff Show More