Compare commits

..

2 Commits

Author SHA1 Message Date
zhao-oai
27018edc50 Merge branch 'main' into patch-guard 2025-10-15 09:38:48 -07:00
kevin zhao
0144fb4fab initial commit 2025-10-14 14:45:15 -07:00
33 changed files with 562 additions and 224 deletions

1
.gitignore vendored
View File

@@ -30,7 +30,6 @@ result
# cli tools
CLAUDE.md
.claude/
AGENTS.override.md
# caches
.cache/

2
codex-rs/Cargo.lock generated
View File

@@ -1051,6 +1051,7 @@ dependencies = [
"escargot",
"eventsource-stream",
"futures",
"ignore",
"indexmap 2.10.0",
"landlock",
"libc",
@@ -1069,6 +1070,7 @@ dependencies = [
"serde_json",
"serial_test",
"sha1",
"sha2",
"shlex",
"similar",
"strum_macros 0.27.2",

View File

@@ -1050,7 +1050,6 @@ impl CodexMessageProcessor {
effort,
summary,
final_output_json_schema: None,
disabled_tools: None,
})
.await;

View File

@@ -33,6 +33,7 @@ env-flags = { workspace = true }
eventsource-stream = { workspace = true }
futures = { workspace = true }
indexmap = { workspace = true }
ignore = { workspace = true }
libc = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
@@ -43,6 +44,7 @@ reqwest = { workspace = true, features = ["json", "stream"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha1 = { workspace = true }
sha2 = { workspace = true }
shlex = { workspace = true }
similar = { workspace = true }
strum_macros = { workspace = true }

View File

@@ -43,7 +43,6 @@ use crate::model_family::ModelFamily;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::openai_model_info::get_model_info;
use crate::openai_tools::create_allowed_tools_json_for_responses_api;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::RateLimitWindow;
@@ -55,7 +54,6 @@ use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::models::ResponseItem;
use std::collections::HashSet;
use std::sync::Arc;
#[derive(Debug, Deserialize)]
@@ -235,39 +233,13 @@ impl ModelClient {
//
// For Azure, we send `store: true` and preserve reasoning item IDs.
let azure_workaround = self.provider.is_azure_responses_endpoint();
let tool_choice = if let Some(disabled) = &prompt.disabled_tools {
if disabled.is_empty() {
serde_json::json!("auto")
} else {
let allowed = create_allowed_tools_json_for_responses_api(&prompt.tools, disabled);
let total_unique = prompt
.tools
.iter()
.map(super::client_common::tools::ToolSpec::name)
.collect::<HashSet<_>>()
.len();
if allowed.is_empty() {
serde_json::json!("none")
} else if allowed.len() == total_unique {
serde_json::json!("auto")
} else {
serde_json::json!({
"type": "allowed_tools",
"mode": "auto",
"tools": allowed,
})
}
}
} else {
serde_json::json!("auto")
};
let payload = ResponsesApiRequest {
model: &self.config.model,
instructions: &full_instructions,
input: &input_with_instructions,
tools: &tools_json,
tool_choice,
tool_choice: "auto",
parallel_tool_calls: prompt.parallel_tool_calls,
reasoning,
store: azure_workaround,

View File

@@ -41,9 +41,6 @@ pub struct Prompt {
/// Optional the output schema for the model's response.
pub output_schema: Option<Value>,
/// Optional list of tool names to disable for this prompt.
pub disabled_tools: Option<Vec<String>>,
}
impl Prompt {
@@ -271,7 +268,7 @@ pub(crate) struct ResponsesApiRequest<'a> {
// separate enum for serialization.
pub(crate) input: &'a Vec<ResponseItem>,
pub(crate) tools: &'a [serde_json::Value],
pub(crate) tool_choice: Value,
pub(crate) tool_choice: &'static str,
pub(crate) parallel_tool_calls: bool,
pub(crate) reasoning: Option<Reasoning>,
pub(crate) store: bool,
@@ -460,7 +457,7 @@ mod tests {
instructions: "i",
input: &input,
tools: &tools,
tool_choice: serde_json::json!("auto"),
tool_choice: "auto",
parallel_tool_calls: true,
reasoning: None,
store: false,
@@ -501,7 +498,7 @@ mod tests {
instructions: "i",
input: &input,
tools: &tools,
tool_choice: serde_json::json!("auto"),
tool_choice: "auto",
parallel_tool_calls: true,
reasoning: None,
store: false,
@@ -537,7 +534,7 @@ mod tests {
instructions: "i",
input: &input,
tools: &tools,
tool_choice: serde_json::json!("auto"),
tool_choice: "auto",
parallel_tool_calls: true,
reasoning: None,
store: false,

View File

@@ -0,0 +1,168 @@
use std::fmt::Write;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use crate::codebase_snapshot::SnapshotDiff;
pub(crate) const CODEBASE_CHANGE_NOTICE_MAX_PATHS: usize = 40;
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct CodebaseChangeNotice {
added: Vec<String>,
removed: Vec<String>,
modified: Vec<String>,
truncated: bool,
}
impl CodebaseChangeNotice {
pub(crate) fn new(diff: SnapshotDiff, limit: usize) -> Self {
let mut remaining = limit;
let mut truncated = false;
let added = take_paths(diff.added, &mut remaining, &mut truncated);
let removed = take_paths(diff.removed, &mut remaining, &mut truncated);
let modified = take_paths(diff.modified, &mut remaining, &mut truncated);
Self {
added,
removed,
modified,
truncated,
}
}
pub(crate) fn is_empty(&self) -> bool {
self.added.is_empty() && self.removed.is_empty() && self.modified.is_empty()
}
pub(crate) fn serialize_to_xml(&self) -> String {
let mut output = String::new();
if self.truncated {
let _ = writeln!(output, "<codebase_changes truncated=\"true\">");
} else {
let _ = writeln!(output, "<codebase_changes>");
}
let mut summary_parts = Vec::new();
if !self.added.is_empty() {
summary_parts.push(format!("added {}", self.added.len()));
}
if !self.removed.is_empty() {
summary_parts.push(format!("removed {}", self.removed.len()));
}
if !self.modified.is_empty() {
summary_parts.push(format!("modified {}", self.modified.len()));
}
if summary_parts.is_empty() {
let _ = writeln!(output, " <summary>no changes</summary>");
} else {
let summary = summary_parts.join(", ");
let _ = writeln!(output, " <summary>{summary}</summary>");
}
serialize_section(&mut output, "added", &self.added);
serialize_section(&mut output, "removed", &self.removed);
serialize_section(&mut output, "modified", &self.modified);
if self.truncated {
let _ = writeln!(output, " <note>additional paths omitted</note>");
}
let _ = writeln!(output, "</codebase_changes>");
output
}
}
fn take_paths(mut paths: Vec<String>, remaining: &mut usize, truncated: &mut bool) -> Vec<String> {
if *remaining == 0 {
if !paths.is_empty() {
*truncated = true;
}
return Vec::new();
}
if paths.len() > *remaining {
paths.truncate(*remaining);
*truncated = true;
}
*remaining -= paths.len();
paths
}
fn serialize_section(output: &mut String, tag: &str, paths: &[String]) {
if paths.is_empty() {
return;
}
let _ = writeln!(output, " <{tag}>");
for path in paths {
let _ = writeln!(output, " <path>{}</path>", escape_xml(path));
}
let _ = writeln!(output, " </{tag}>");
}
fn escape_xml(value: &str) -> String {
let mut escaped = String::with_capacity(value.len());
for ch in value.chars() {
match ch {
'&' => escaped.push_str("&amp;"),
'<' => escaped.push_str("&lt;"),
'>' => escaped.push_str("&gt;"),
'"' => escaped.push_str("&quot;"),
'\'' => escaped.push_str("&apos;"),
other => escaped.push(other),
}
}
escaped
}
impl From<CodebaseChangeNotice> for ResponseItem {
fn from(notice: CodebaseChangeNotice) -> Self {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: notice.serialize_to_xml(),
}],
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn constructs_notice_with_limit() {
let diff = SnapshotDiff {
added: vec!["a.rs".to_string(), "b.rs".to_string()],
removed: vec!["c.rs".to_string()],
modified: vec!["d.rs".to_string(), "e.rs".to_string()],
};
let notice = CodebaseChangeNotice::new(diff, 3);
assert!(notice.truncated);
assert_eq!(
notice.added.len() + notice.removed.len() + notice.modified.len(),
3
);
}
#[test]
fn serializes_notice() {
let diff = SnapshotDiff {
added: vec!["src/lib.rs".to_string()],
removed: Vec::new(),
modified: vec!["src/main.rs".to_string()],
};
let notice = CodebaseChangeNotice::new(diff, CODEBASE_CHANGE_NOTICE_MAX_PATHS);
let xml = notice.serialize_to_xml();
assert!(xml.contains("<added>"));
assert!(xml.contains("<modified>"));
assert!(xml.contains("src/lib.rs"));
assert!(xml.contains("src/main.rs"));
}
}

View File

@@ -0,0 +1,278 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
use anyhow::Context;
use anyhow::Result;
use ignore::WalkBuilder;
use sha2::Digest;
use sha2::Sha256;
use tokio::task;
use tracing::warn;
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct CodebaseSnapshot {
root: PathBuf,
entries: BTreeMap<String, EntryFingerprint>,
root_digest: DigestBytes,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct EntryFingerprint {
pub kind: EntryKind,
pub digest: DigestBytes,
pub size: u64,
pub modified_millis: Option<u128>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum EntryKind {
File,
Symlink,
}
#[derive(Clone, Debug, PartialEq, Eq, Default)]
pub(crate) struct SnapshotDiff {
pub added: Vec<String>,
pub removed: Vec<String>,
pub modified: Vec<String>,
}
impl SnapshotDiff {
pub fn is_empty(&self) -> bool {
self.added.is_empty() && self.removed.is_empty() && self.modified.is_empty()
}
}
pub(crate) type DigestBytes = [u8; 32];
impl CodebaseSnapshot {
pub(crate) async fn capture(root: PathBuf) -> Result<Self> {
task::spawn_blocking(move || Self::from_disk(&root))
.await
.map_err(|e| anyhow::anyhow!("codebase snapshot task failed: {e}"))?
}
pub(crate) fn from_disk(root: &Path) -> Result<Self> {
if !root.exists() {
return Ok(Self::empty(root));
}
let mut entries: BTreeMap<String, EntryFingerprint> = BTreeMap::new();
let mut walker = WalkBuilder::new(root);
walker
.hidden(false)
.git_ignore(true)
.git_exclude(true)
.parents(true)
.ignore(true)
.follow_links(false);
for result in walker.build() {
let entry = match result {
Ok(entry) => entry,
Err(err) => {
warn!("codebase snapshot failed to read entry: {err}");
continue;
}
};
let path = entry.path();
if entry.depth() == 0 {
continue;
}
let relative = match path.strip_prefix(root) {
Ok(rel) => rel,
Err(_) => continue,
};
if relative.as_os_str().is_empty() {
continue;
}
let rel_string = normalize_rel_path(relative);
let file_type = match entry.file_type() {
Some(file_type) => file_type,
None => continue,
};
if file_type.is_dir() {
continue;
}
if file_type.is_file() {
match fingerprint_file(path) {
Ok(fp) => {
entries.insert(rel_string, fp);
}
Err(err) => {
warn!(
"codebase snapshot failed to hash file {}: {err}",
path.display()
);
}
}
continue;
}
if file_type.is_symlink() {
match fingerprint_symlink(path) {
Ok(fp) => {
entries.insert(rel_string, fp);
}
Err(err) => {
warn!(
"codebase snapshot failed to hash symlink {}: {err}",
path.display()
);
}
}
continue;
}
}
let root_digest = compute_root_digest(&entries);
Ok(Self {
root: root.to_path_buf(),
entries,
root_digest,
})
}
pub(crate) fn diff(&self, newer: &CodebaseSnapshot) -> SnapshotDiff {
let mut diff = SnapshotDiff::default();
for (path, fingerprint) in &newer.entries {
match self.entries.get(path) {
None => diff.added.push(path.clone()),
Some(existing) if existing != fingerprint => diff.modified.push(path.clone()),
_ => {}
}
}
for path in self.entries.keys() {
if !newer.entries.contains_key(path) {
diff.removed.push(path.clone());
}
}
diff
}
pub(crate) fn root(&self) -> &Path {
&self.root
}
fn empty(root: &Path) -> Self {
Self {
root: root.to_path_buf(),
entries: BTreeMap::new(),
root_digest: Sha256::digest(b"").into(),
}
}
}
fn fingerprint_file(path: &Path) -> Result<EntryFingerprint> {
let metadata = path
.metadata()
.with_context(|| format!("metadata {}", path.display()))?;
let mut file = File::open(path).with_context(|| format!("open {}", path.display()))?;
let mut hasher = Sha256::new();
let mut buf = [0u8; 64 * 1024];
loop {
let read = file.read(&mut buf)?;
if read == 0 {
break;
}
hasher.update(&buf[..read]);
}
Ok(EntryFingerprint {
kind: EntryKind::File,
digest: hasher.finalize().into(),
size: metadata.len(),
modified_millis: metadata.modified().ok().and_then(system_time_to_millis),
})
}
fn fingerprint_symlink(path: &Path) -> Result<EntryFingerprint> {
let target =
std::fs::read_link(path).with_context(|| format!("read_link {}", path.display()))?;
let mut hasher = Sha256::new();
let target_str = normalize_rel_path(&target);
hasher.update(target_str.as_bytes());
Ok(EntryFingerprint {
kind: EntryKind::Symlink,
digest: hasher.finalize().into(),
size: 0,
modified_millis: None,
})
}
fn compute_root_digest(entries: &BTreeMap<String, EntryFingerprint>) -> DigestBytes {
let mut hasher = Sha256::new();
for (path, fingerprint) in entries {
hasher.update(path.as_bytes());
hasher.update(fingerprint.digest);
hasher.update([fingerprint.kind as u8]);
hasher.update(fingerprint.size.to_le_bytes());
if let Some(modified) = fingerprint.modified_millis {
hasher.update(modified.to_le_bytes());
}
}
hasher.finalize().into()
}
fn normalize_rel_path(path: &Path) -> String {
let s = path_to_cow(path);
if s.is_empty() {
String::new()
} else {
s.replace('\\', "/")
}
}
fn path_to_cow(path: &Path) -> Cow<'_, str> {
path.to_string_lossy()
}
fn system_time_to_millis(ts: SystemTime) -> Option<u128> {
ts.duration_since(SystemTime::UNIX_EPOCH)
.map(|duration| duration.as_millis())
.ok()
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[test]
fn diff_tracks_added_modified_removed() {
let dir = tempdir().unwrap();
let root = dir.path();
std::fs::write(root.join("file_a.txt"), "alpha").unwrap();
std::fs::write(root.join("file_b.txt"), "bravo").unwrap();
let snapshot_one = CodebaseSnapshot::from_disk(root).unwrap();
std::fs::write(root.join("file_a.txt"), "alpha-updated").unwrap();
std::fs::remove_file(root.join("file_b.txt")).unwrap();
std::fs::write(root.join("file_c.txt"), "charlie").unwrap();
let snapshot_two = CodebaseSnapshot::from_disk(root).unwrap();
let diff = snapshot_one.diff(&snapshot_two);
assert_eq!(diff.added, vec!["file_c.txt".to_string()]);
assert_eq!(diff.modified, vec!["file_a.txt".to_string()]);
assert_eq!(diff.removed, vec!["file_b.txt".to_string()]);
}
}

View File

@@ -1,5 +1,6 @@
use std::borrow::Cow;
use std::fmt::Debug;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
@@ -43,6 +44,9 @@ use crate::apply_patch::convert_apply_patch_to_protocol;
use crate::client::ModelClient;
use crate::client_common::Prompt;
use crate::client_common::ResponseEvent;
use crate::codebase_change_notice::CODEBASE_CHANGE_NOTICE_MAX_PATHS;
use crate::codebase_change_notice::CodebaseChangeNotice;
use crate::codebase_snapshot::CodebaseSnapshot;
use crate::config::Config;
use crate::config_types::ShellEnvironmentPolicy;
use crate::conversation_history::ConversationHistory;
@@ -265,7 +269,6 @@ pub(crate) struct TurnContext {
pub(crate) tools_config: ToolsConfig,
pub(crate) is_review_mode: bool,
pub(crate) final_output_json_schema: Option<Value>,
pub(crate) disabled_tools: Option<Vec<String>>,
}
impl TurnContext {
@@ -489,7 +492,6 @@ impl Session {
cwd,
is_review_mode: false,
final_output_json_schema: None,
disabled_tools: None,
};
let services = SessionServices {
mcp_connection_manager,
@@ -748,6 +750,73 @@ impl Session {
self.persist_rollout_items(&rollout_items).await;
}
async fn stored_snapshot_for_root(&self, root: &Path) -> Option<CodebaseSnapshot> {
let state = self.state.lock().await;
state
.codebase_snapshot
.as_ref()
.filter(|snapshot| snapshot.root() == root)
.cloned()
}
async fn set_codebase_snapshot(&self, snapshot: CodebaseSnapshot) {
let mut state = self.state.lock().await;
state.codebase_snapshot = Some(snapshot);
}
pub(crate) async fn emit_codebase_delta_if_changed(
&self,
turn_context: &TurnContext,
sub_id: &str,
) -> anyhow::Result<()> {
let cwd = turn_context.cwd.clone();
let previous = self.stored_snapshot_for_root(&cwd).await;
let latest = CodebaseSnapshot::capture(cwd.clone()).await?;
if let Some(previous_snapshot) = previous {
let diff = previous_snapshot.diff(&latest);
if diff.is_empty() {
self.set_codebase_snapshot(latest).await;
return Ok(());
}
let notice = CodebaseChangeNotice::new(diff, CODEBASE_CHANGE_NOTICE_MAX_PATHS);
if notice.is_empty() {
self.set_codebase_snapshot(latest).await;
return Ok(());
}
let response_item: ResponseItem = notice.into();
self.record_conversation_items(std::slice::from_ref(&response_item))
.await;
for msg in
map_response_item_to_event_messages(&response_item, self.show_raw_agent_reasoning())
{
let event = Event {
id: sub_id.to_string(),
msg,
};
self.send_event(event).await;
}
self.set_codebase_snapshot(latest).await;
return Ok(());
}
self.set_codebase_snapshot(latest).await;
Ok(())
}
pub(crate) async fn refresh_codebase_snapshot(
&self,
turn_context: &TurnContext,
) -> anyhow::Result<()> {
let snapshot = CodebaseSnapshot::capture(turn_context.cwd.clone()).await?;
self.set_codebase_snapshot(snapshot).await;
Ok(())
}
pub(crate) fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> {
let mut items = Vec::<ResponseItem>::with_capacity(2);
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
@@ -1173,7 +1242,6 @@ async fn submission_loop(
model,
effort,
summary,
disabled_tools,
} => {
// Recalculate the persistent turn context with provided overrides.
let prev = Arc::clone(&turn_context);
@@ -1222,9 +1290,6 @@ async fn submission_loop(
.clone()
.unwrap_or(prev.sandbox_policy.clone());
let new_cwd = cwd.clone().unwrap_or_else(|| prev.cwd.clone());
let new_disabled_tools = disabled_tools
.clone()
.or_else(|| prev.disabled_tools.clone());
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &effective_family,
@@ -1242,7 +1307,6 @@ async fn submission_loop(
cwd: new_cwd.clone(),
is_review_mode: false,
final_output_json_schema: None,
disabled_tools: new_disabled_tools,
};
// Install the new persistent context for subsequent tasks/turns.
@@ -1281,7 +1345,6 @@ async fn submission_loop(
effort,
summary,
final_output_json_schema,
disabled_tools,
} => {
turn_context
.client
@@ -1322,9 +1385,6 @@ async fn submission_loop(
summary,
sess.conversation_id,
);
let effective_disabled_tools = disabled_tools
.clone()
.or_else(|| turn_context.disabled_tools.clone());
let fresh_turn_context = TurnContext {
client,
@@ -1340,7 +1400,6 @@ async fn submission_loop(
cwd,
is_review_mode: false,
final_output_json_schema,
disabled_tools: effective_disabled_tools.clone(),
};
// if the environment context has changed, record it in the conversation history
@@ -1620,7 +1679,6 @@ async fn spawn_review_thread(
cwd: parent_turn_context.cwd.clone(),
is_review_mode: true,
final_output_json_schema: None,
disabled_tools: parent_turn_context.disabled_tools.clone(),
};
// Seed the child task with the review prompt as the initial user message.
@@ -1691,6 +1749,14 @@ pub(crate) async fn run_task(
.await;
}
if !is_review_mode
&& let Err(err) = sess
.emit_codebase_delta_if_changed(turn_context.as_ref(), &sub_id)
.await
{
warn!(error = ?err, "failed to compute codebase changes");
}
let mut last_agent_message: Option<String> = None;
// Although from the perspective of codex.rs, TurnDiffTracker has the lifecycle of a Task which contains
// many turns, from the perspective of the user, it is a single turn.
@@ -1924,6 +1990,11 @@ pub(crate) async fn run_task(
}
}
if !is_review_mode && let Err(err) = sess.refresh_codebase_snapshot(turn_context.as_ref()).await
{
warn!(error = ?err, "failed to refresh codebase snapshot");
}
// If this was a review thread and we have a final assistant message,
// try to parse it as a ReviewOutput.
//
@@ -1993,7 +2064,6 @@ async fn run_turn(
parallel_tool_calls,
base_instructions_override: turn_context.base_instructions.clone(),
output_schema: turn_context.final_output_json_schema.clone(),
disabled_tools: turn_context.disabled_tools.clone(),
};
let mut retries = 0;
@@ -2801,7 +2871,6 @@ mod tests {
tools_config,
is_review_mode: false,
final_output_json_schema: None,
disabled_tools: None,
};
let services = SessionServices {
mcp_connection_manager: McpConnectionManager::default(),
@@ -2870,7 +2939,6 @@ mod tests {
tools_config,
is_review_mode: false,
final_output_json_schema: None,
disabled_tools: None,
});
let services = SessionServices {
mcp_connection_manager: McpConnectionManager::default(),

View File

@@ -92,7 +92,6 @@ async fn run_compact_task_inner(
loop {
let prompt = Prompt {
input: turn_input.clone(),
disabled_tools: turn_context.disabled_tools.clone(),
..Default::default()
};
let attempt_result =

View File

@@ -28,8 +28,6 @@ use crate::model_family::find_family_for_model;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::built_in_model_providers;
use crate::openai_model_info::get_model_info;
use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use anyhow::Context;
@@ -1219,18 +1217,20 @@ impl Config {
}
fn load_instructions(codex_dir: Option<&Path>) -> Option<String> {
let base = codex_dir?;
for candidate in [LOCAL_PROJECT_DOC_FILENAME, DEFAULT_PROJECT_DOC_FILENAME] {
let mut path = base.to_path_buf();
path.push(candidate);
if let Ok(contents) = std::fs::read_to_string(&path) {
let trimmed = contents.trim();
if !trimmed.is_empty() {
return Some(trimmed.to_string());
}
let mut p = match codex_dir {
Some(p) => p.to_path_buf(),
None => return None,
};
p.push("AGENTS.md");
std::fs::read_to_string(&p).ok().and_then(|s| {
let s = s.trim();
if s.is_empty() {
None
} else {
Some(s.to_string())
}
}
None
})
}
fn get_base_instructions(

View File

@@ -11,6 +11,8 @@ pub mod bash;
mod chat_completions;
mod client;
mod client_common;
mod codebase_change_notice;
mod codebase_snapshot;
pub mod codex;
mod codex_conversation;
pub mod token_data;

View File

@@ -21,8 +21,6 @@ use tracing::error;
/// Default filename scanned for project-level docs.
pub const DEFAULT_PROJECT_DOC_FILENAME: &str = "AGENTS.md";
/// Preferred local override for project-level docs.
pub const LOCAL_PROJECT_DOC_FILENAME: &str = "AGENTS.override.md";
/// When both `Config::instructions` and the project doc are present, they will
/// be concatenated with the following separator.
@@ -180,8 +178,7 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBu
fn candidate_filenames<'a>(config: &'a Config) -> Vec<&'a str> {
let mut names: Vec<&'a str> =
Vec::with_capacity(2 + config.project_doc_fallback_filenames.len());
names.push(LOCAL_PROJECT_DOC_FILENAME);
Vec::with_capacity(1 + config.project_doc_fallback_filenames.len());
names.push(DEFAULT_PROJECT_DOC_FILENAME);
for candidate in &config.project_doc_fallback_filenames {
let candidate = candidate.as_str();
@@ -384,29 +381,6 @@ mod tests {
assert_eq!(res, "root doc\n\ncrate doc");
}
/// AGENTS.override.md is preferred over AGENTS.md when both are present.
#[tokio::test]
async fn agents_local_md_preferred() {
let tmp = tempfile::tempdir().expect("tempdir");
fs::write(tmp.path().join(DEFAULT_PROJECT_DOC_FILENAME), "versioned").unwrap();
fs::write(tmp.path().join(LOCAL_PROJECT_DOC_FILENAME), "local").unwrap();
let cfg = make_config(&tmp, 4096, None);
let res = get_user_instructions(&cfg)
.await
.expect("local doc expected");
assert_eq!(res, "local");
let discovery = discover_project_doc_paths(&cfg).expect("discover paths");
assert_eq!(discovery.len(), 1);
assert_eq!(
discovery[0].file_name().unwrap().to_string_lossy(),
LOCAL_PROJECT_DOC_FILENAME
);
}
/// When AGENTS.md is absent but a configured fallback exists, the fallback is used.
#[tokio::test]
async fn uses_configured_fallback_when_agents_missing() {

View File

@@ -2,6 +2,7 @@
use codex_protocol::models::ResponseItem;
use crate::codebase_snapshot::CodebaseSnapshot;
use crate::conversation_history::ConversationHistory;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
@@ -13,6 +14,7 @@ pub(crate) struct SessionState {
pub(crate) history: ConversationHistory,
pub(crate) token_info: Option<TokenUsageInfo>,
pub(crate) latest_rate_limits: Option<RateLimitSnapshot>,
pub(crate) codebase_snapshot: Option<CodebaseSnapshot>,
}
impl SessionState {

View File

@@ -145,22 +145,6 @@ impl ToolRouter {
let payload_outputs_custom = matches!(payload, ToolPayload::Custom { .. });
let failure_call_id = call_id.clone();
if turn
.disabled_tools
.as_ref()
.map(|tools| tools.iter().any(|name| name == &tool_name))
.unwrap_or(false)
{
let err = FunctionCallError::RespondToModel(format!(
"tool {tool_name} is disabled for this turn"
));
return Ok(Self::failure_response(
failure_call_id,
payload_outputs_custom,
err,
));
}
let invocation = ToolInvocation {
session,
turn,

View File

@@ -14,7 +14,6 @@ use serde_json::Value as JsonValue;
use serde_json::json;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
#[derive(Debug, Clone)]
pub enum ConfigShellToolType {
@@ -533,40 +532,6 @@ pub fn create_tools_json_for_responses_api(
Ok(tools_json)
}
pub fn create_allowed_tools_json_for_responses_api(
tools: &[ToolSpec],
disabled_names: &[String],
) -> Vec<serde_json::Value> {
let disabled_set: HashSet<&str> = disabled_names
.iter()
.map(std::string::String::as_str)
.collect();
let mut seen = HashSet::new();
let mut allowed = Vec::new();
for spec in tools {
let name = spec.name();
if disabled_set.contains(name) || !seen.insert(name) {
continue;
}
let value = match spec {
ToolSpec::Function(tool) => json!({
"type": "function",
"name": tool.name,
}),
ToolSpec::Freeform(tool) => json!({
"type": "custom",
"name": tool.name,
}),
ToolSpec::LocalShell {} => json!({ "type": "local_shell" }),
ToolSpec::WebSearch {} => json!({ "type": "web_search" }),
};
allowed.push(value);
}
allowed
}
/// Returns JSON values that are compatible with Function Calling in the
/// Chat Completions API:
/// https://platform.openai.com/docs/guides/function-calling?api-mode=chat
@@ -1128,33 +1093,6 @@ mod tests {
);
}
#[test]
fn create_allowed_tools_excludes_disabled_entries() {
let shell = super::create_shell_tool();
let web_search = ToolSpec::WebSearch {};
let view_image = super::create_view_image_tool();
let specs = vec![shell, web_search, view_image];
let allowed = super::create_allowed_tools_json_for_responses_api(
&specs,
&[String::from("web_search")],
);
assert_eq!(allowed.len(), 2);
assert!(allowed.iter().any(|tool| {
tool.get("type") == Some(&serde_json::Value::String("function".into()))
&& tool.get("name") == Some(&serde_json::Value::String("shell".into()))
}));
assert!(allowed.iter().any(|tool| {
tool.get("type") == Some(&serde_json::Value::String("function".into()))
&& tool.get("name") == Some(&serde_json::Value::String("view_image".into()))
}));
assert!(allowed.iter().all(|tool| {
tool.get("name") != Some(&serde_json::Value::String("web_search".into()))
}));
}
#[test]
fn test_build_specs_mcp_tools_sorted_by_name() {
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");

View File

@@ -124,7 +124,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
.unwrap_or_default()
.to_string();
let tool_calls = json!(requests[0]["tools"].as_array());
let tool_choice_request_1 = requests[0]["tool_choice"].clone();
let prompt_cache_key = requests[0]["prompt_cache_key"]
.as_str()
.unwrap_or_default()
@@ -133,10 +132,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
.as_str()
.unwrap_or_default()
.to_string();
let tool_choice_compact_1 = requests[1]["tool_choice"].clone();
let tool_choice_after_compact = requests[2]["tool_choice"].clone();
let tool_choice_after_resume = requests[3]["tool_choice"].clone();
let tool_choice_after_fork = requests[4]["tool_choice"].clone();
let expected_model = OPENAI_DEFAULT_MODEL;
let user_turn_1 = json!(
{
@@ -175,7 +170,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
}
],
"tools": tool_calls,
"tool_choice": tool_choice_request_1,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
@@ -244,7 +239,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
}
],
"tools": [],
"tool_choice": tool_choice_compact_1,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
@@ -309,7 +304,7 @@ SUMMARY_ONLY_CONTEXT"
}
],
"tools": tool_calls,
"tool_choice": tool_choice_after_compact,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
@@ -394,7 +389,7 @@ SUMMARY_ONLY_CONTEXT"
}
],
"tools": tool_calls,
"tool_choice": tool_choice_after_resume,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"
@@ -479,7 +474,7 @@ SUMMARY_ONLY_CONTEXT"
}
],
"tools": tool_calls,
"tool_choice": tool_choice_after_fork,
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"summary": "auto"

View File

@@ -159,7 +159,6 @@ async fn submit_turn(test: &TestCodex, prompt: &str) -> Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -84,7 +84,6 @@ async fn codex_returns_json_result(model: String) -> anyhow::Result<()> {
model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -76,7 +76,6 @@ async fn list_dir_tool_returns_entries() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -182,7 +181,6 @@ async fn list_dir_tool_depth_one_omits_children() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -295,7 +293,6 @@ async fn list_dir_tool_depth_two_includes_children_only() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -411,7 +408,6 @@ async fn list_dir_tool_depth_three_includes_grandchildren() -> anyhow::Result<()
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -38,7 +38,6 @@ async fn override_turn_context_does_not_persist_when_config_exists() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
summary: None,
disabled_tools: None,
})
.await
.expect("submit override");
@@ -79,7 +78,6 @@ async fn override_turn_context_does_not_create_config_file() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::Medium)),
summary: None,
disabled_tools: None,
})
.await
.expect("submit override");

View File

@@ -443,7 +443,6 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
model: Some("o3".to_string()),
effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed),
disabled_tools: None,
})
.await
.unwrap();
@@ -578,7 +577,6 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
effort: Some(ReasoningEffort::High),
summary: ReasoningSummary::Detailed,
final_output_json_schema: None,
disabled_tools: None,
})
.await
.unwrap();
@@ -690,7 +688,6 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
effort: default_effort,
summary: default_summary,
final_output_json_schema: None,
disabled_tools: None,
})
.await
.unwrap();
@@ -708,7 +705,6 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
effort: default_effort,
summary: default_summary,
final_output_json_schema: None,
disabled_tools: None,
})
.await
.unwrap();
@@ -806,7 +802,6 @@ async fn send_user_turn_with_changes_sends_environment_context() {
effort: default_effort,
summary: default_summary,
final_output_json_schema: None,
disabled_tools: None,
})
.await
.unwrap();
@@ -824,7 +819,6 @@ async fn send_user_turn_with_changes_sends_environment_context() {
effort: Some(ReasoningEffort::High),
summary: ReasoningSummary::Detailed,
final_output_json_schema: None,
disabled_tools: None,
})
.await
.unwrap();

View File

@@ -74,7 +74,6 @@ async fn read_file_tool_returns_requested_lines() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -110,7 +110,6 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -260,7 +259,6 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -442,7 +440,6 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -45,7 +45,6 @@ async fn submit_turn(test: &TestCodex, prompt: &str, sandbox_policy: SandboxPoli
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -84,7 +84,6 @@ async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()>
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -154,7 +153,6 @@ async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -238,7 +236,6 @@ async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -337,7 +334,6 @@ async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -439,7 +435,6 @@ async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -38,7 +38,6 @@ async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -48,7 +48,6 @@ async fn submit_turn(
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -128,7 +128,6 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -265,7 +264,6 @@ PY
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -372,7 +370,6 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -100,7 +100,6 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -200,7 +199,6 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;
@@ -266,7 +264,6 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
model: session_model,
effort: None,
summary: ReasoningSummary::Auto,
disabled_tools: None,
})
.await?;

View File

@@ -348,7 +348,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
effort: default_effort,
summary: default_summary,
final_output_json_schema: output_schema,
disabled_tools: None,
})
.await?;
info!("Sent prompt with event ID: {initial_prompt_task_id}");

View File

@@ -90,9 +90,6 @@ pub enum Op {
summary: ReasoningSummaryConfig,
// The JSON schema to use for the final assistant message
final_output_json_schema: Option<Value>,
/// Optional list of tool names to disable for this turn.
#[serde(skip_serializing_if = "Option::is_none")]
disabled_tools: Option<Vec<String>>,
},
/// Override parts of the persistent turn context for subsequent turns.
@@ -128,9 +125,6 @@ pub enum Op {
/// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
summary: Option<ReasoningSummaryConfig>,
/// Updated default disabled tool list for subsequent turns.
#[serde(skip_serializing_if = "Option::is_none")]
disabled_tools: Option<Vec<String>>,
},
/// Approve a command execution

View File

@@ -1751,7 +1751,6 @@ impl ChatWidget {
model: Some(model_for_action.clone()),
effort: Some(effort_for_action),
summary: None,
disabled_tools: None,
}));
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort_for_action));
@@ -1808,7 +1807,6 @@ impl ChatWidget {
model: None,
effort: None,
summary: None,
disabled_tools: None,
}));
tx.send(AppEvent::UpdateAskForApprovalPolicy(approval));
tx.send(AppEvent::UpdateSandboxPolicy(sandbox.clone()));