mirror of
https://github.com/openai/codex.git
synced 2026-02-08 09:53:39 +00:00
Compare commits
2 Commits
codex--app
...
mcp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4243c44b22 | ||
|
|
021d9beebd |
@@ -2,7 +2,7 @@
|
||||
|
||||
In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
|
||||
- Crate names are prefixed with `codex-`. For examole, the `core` folder's crate is named `codex-core`
|
||||
- When using format! and you can inline variables into {}, always do that.
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
|
||||
90
codex-rs/Cargo.lock
generated
90
codex-rs/Cargo.lock
generated
@@ -635,7 +635,6 @@ name = "codex-apply-patch"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"pretty_assertions",
|
||||
"similar",
|
||||
"tempfile",
|
||||
@@ -653,7 +652,6 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-linux-sandbox",
|
||||
"dotenvy",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
@@ -733,7 +731,6 @@ dependencies = [
|
||||
"mime_guess",
|
||||
"openssl-sys",
|
||||
"os_info",
|
||||
"portable-pty",
|
||||
"predicates",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.2",
|
||||
@@ -754,7 +751,7 @@ dependencies = [
|
||||
"tokio-test",
|
||||
"tokio-util",
|
||||
"toml 0.9.5",
|
||||
"toml_edit 0.23.4",
|
||||
"toml_edit 0.23.3",
|
||||
"tracing",
|
||||
"tree-sitter",
|
||||
"tree-sitter-bash",
|
||||
@@ -1482,12 +1479,6 @@ version = "0.15.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
|
||||
|
||||
[[package]]
|
||||
name = "downcast-rs"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
|
||||
|
||||
[[package]]
|
||||
name = "dupe"
|
||||
version = "0.9.1"
|
||||
@@ -1733,17 +1724,6 @@ dependencies = [
|
||||
"simd-adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "filedescriptor"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"thiserror 1.0.69",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fixedbitset"
|
||||
version = "0.4.2"
|
||||
@@ -2722,7 +2702,6 @@ checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3460,27 +3439,6 @@ dependencies = [
|
||||
"portable-atomic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-pty"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4a596a2b3d2752d94f51fac2d4a96737b8705dddd311a32b9af47211f08671e"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bitflags 1.3.2",
|
||||
"downcast-rs",
|
||||
"filedescriptor",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"nix",
|
||||
"serial2",
|
||||
"shared_library",
|
||||
"shell-words",
|
||||
"winapi",
|
||||
"winreg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.2"
|
||||
@@ -4408,17 +4366,6 @@ dependencies = [
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serial2"
|
||||
version = "0.2.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26e1e5956803a69ddd72ce2de337b577898801528749565def03515f82bad5bb"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.6"
|
||||
@@ -4450,22 +4397,6 @@ dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shared_library"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shell-words"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
@@ -5195,9 +5126,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.23.4"
|
||||
version = "0.23.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93"
|
||||
checksum = "17d3b47e6b7a040216ae5302712c94d1cf88c95b47efa80e2c59ce96c878267e"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"toml_datetime 0.7.0",
|
||||
@@ -5778,11 +5709,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "whoami"
|
||||
version = "1.6.1"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d"
|
||||
checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7"
|
||||
dependencies = [
|
||||
"libredox",
|
||||
"redox_syscall",
|
||||
"wasite",
|
||||
"web-sys",
|
||||
]
|
||||
@@ -6245,15 +6176,6 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winsafe"
|
||||
version = "0.0.19"
|
||||
|
||||
@@ -43,12 +43,6 @@ To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the p
|
||||
|
||||
Typing `@` triggers a fuzzy-filename search over the workspace root. Use up/down to select among the results and Tab or Enter to replace the `@` with the selected path. You can use Esc to cancel the search.
|
||||
|
||||
### Esc–Esc to edit a previous message
|
||||
|
||||
When the chat composer is empty, press Esc to prime “backtrack” mode. Press Esc again to open a transcript preview highlighting the last user message; press Esc repeatedly to step to older user messages. Press Enter to confirm and Codex will fork the conversation from that point, trim the visible transcript accordingly, and pre‑fill the composer with the selected user message so you can edit and resubmit it.
|
||||
|
||||
In the transcript preview, the footer shows an `Esc edit prev` hint while editing is active.
|
||||
|
||||
### `--cd`/`-C` flag
|
||||
|
||||
Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session.
|
||||
|
||||
@@ -7,10 +7,6 @@ version = { workspace = true }
|
||||
name = "codex_apply_patch"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "apply_patch"
|
||||
path = "src/main.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -22,6 +18,5 @@ tree-sitter = "0.25.8"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3.13.0"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
mod parser;
|
||||
mod seek_sequence;
|
||||
mod standalone_executable;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
@@ -20,8 +19,6 @@ use tree_sitter::LanguageError;
|
||||
use tree_sitter::Parser;
|
||||
use tree_sitter_bash::LANGUAGE as BASH;
|
||||
|
||||
pub use standalone_executable::main;
|
||||
|
||||
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
|
||||
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
|
||||
|
||||
@@ -532,51 +529,32 @@ fn compute_replacements(
|
||||
let mut line_index: usize = 0;
|
||||
|
||||
for chunk in chunks {
|
||||
// If a chunk has context lines, we use seek_sequence to find each in order,
|
||||
// then adjust our `line_index` to continue from there.
|
||||
if !chunk.context_lines.is_empty() {
|
||||
let total = chunk.context_lines.len();
|
||||
for (i, ctx_line) in chunk.context_lines.iter().enumerate() {
|
||||
if let Some(idx) = seek_sequence::seek_sequence(
|
||||
original_lines,
|
||||
std::slice::from_ref(ctx_line),
|
||||
line_index,
|
||||
false,
|
||||
) {
|
||||
line_index = idx + 1;
|
||||
} else {
|
||||
return Err(ApplyPatchError::ComputeReplacements(format!(
|
||||
"Failed to find context {}/{}: '{}' in {}",
|
||||
i + 1,
|
||||
total,
|
||||
ctx_line,
|
||||
path.display()
|
||||
)));
|
||||
}
|
||||
// If a chunk has a `change_context`, we use seek_sequence to find it, then
|
||||
// adjust our `line_index` to continue from there.
|
||||
if let Some(ctx_line) = &chunk.change_context {
|
||||
if let Some(idx) = seek_sequence::seek_sequence(
|
||||
original_lines,
|
||||
std::slice::from_ref(ctx_line),
|
||||
line_index,
|
||||
false,
|
||||
) {
|
||||
line_index = idx + 1;
|
||||
} else {
|
||||
return Err(ApplyPatchError::ComputeReplacements(format!(
|
||||
"Failed to find context '{}' in {}",
|
||||
ctx_line,
|
||||
path.display()
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if chunk.old_lines.is_empty() {
|
||||
// Pure addition (no old lines).
|
||||
// Prefer to insert at the matched context anchor if one exists and
|
||||
// the hunk is not explicitly marked as end-of-file.
|
||||
let insertion_idx = if chunk.is_end_of_file {
|
||||
if original_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
original_lines.len() - 1
|
||||
} else {
|
||||
original_lines.len()
|
||||
}
|
||||
} else if !chunk.context_lines.is_empty() {
|
||||
// Insert immediately after the last matched context line.
|
||||
line_index
|
||||
// Pure addition (no old lines). We'll add them at the end or just
|
||||
// before the final empty line if one exists.
|
||||
let insertion_idx = if original_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
original_lines.len() - 1
|
||||
} else {
|
||||
// No context provided: fall back to appending at the end (before
|
||||
// the trailing empty line if present).
|
||||
if original_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
original_lines.len() - 1
|
||||
} else {
|
||||
original_lines.len()
|
||||
}
|
||||
original_lines.len()
|
||||
};
|
||||
replacements.push((insertion_idx, 0, chunk.new_lines.clone()));
|
||||
continue;
|
||||
@@ -1289,57 +1267,6 @@ g
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_addition_after_single_context_anchor() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("single_ctx.txt");
|
||||
fs::write(&path, "class BaseClass:\n def method():\nline1\nline2\n").unwrap();
|
||||
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@ class BaseClass:
|
||||
+INSERTED
|
||||
"#,
|
||||
path.display()
|
||||
));
|
||||
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
|
||||
let contents = fs::read_to_string(path).unwrap();
|
||||
assert_eq!(
|
||||
contents,
|
||||
"class BaseClass:\nINSERTED\n def method():\nline1\nline2\n"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_addition_after_multi_context_anchor() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("multi_ctx.txt");
|
||||
fs::write(&path, "class BaseClass:\n def method():\nline1\nline2\n").unwrap();
|
||||
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@ class BaseClass:
|
||||
@@ def method():
|
||||
+INSERTED
|
||||
"#,
|
||||
path.display()
|
||||
));
|
||||
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
|
||||
let contents = fs::read_to_string(path).unwrap();
|
||||
assert_eq!(
|
||||
contents,
|
||||
"class BaseClass:\n def method():\nINSERTED\nline1\nline2\n"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_should_resolve_absolute_paths_in_cwd() {
|
||||
let session_dir = tempdir().unwrap();
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
pub fn main() -> ! {
|
||||
codex_apply_patch::main()
|
||||
}
|
||||
@@ -69,7 +69,7 @@ pub enum Hunk {
|
||||
path: PathBuf,
|
||||
move_path: Option<PathBuf>,
|
||||
|
||||
/// Chunks should be in order, i.e. the first context line of one chunk
|
||||
/// Chunks should be in order, i.e. the `change_context` of one chunk
|
||||
/// should occur later in the file than the previous chunk.
|
||||
chunks: Vec<UpdateFileChunk>,
|
||||
},
|
||||
@@ -89,13 +89,12 @@ use Hunk::*;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct UpdateFileChunk {
|
||||
/// Context lines used to narrow down the position of the chunk.
|
||||
/// Each entry is searched sequentially to progressively restrict the
|
||||
/// search to the desired region (e.g. class → method).
|
||||
pub context_lines: Vec<String>,
|
||||
/// A single line of context used to narrow down the position of the chunk
|
||||
/// (this is usually a class, method, or function definition.)
|
||||
pub change_context: Option<String>,
|
||||
|
||||
/// A contiguous block of lines that should be replaced with `new_lines`.
|
||||
/// `old_lines` must occur strictly after the context.
|
||||
/// `old_lines` must occur strictly after `change_context`.
|
||||
pub old_lines: Vec<String>,
|
||||
pub new_lines: Vec<String>,
|
||||
|
||||
@@ -345,38 +344,32 @@ fn parse_update_file_chunk(
|
||||
line_number,
|
||||
});
|
||||
}
|
||||
let mut context_lines = Vec::new();
|
||||
let mut start_index = 0;
|
||||
let mut saw_context_marker = false;
|
||||
while start_index < lines.len() {
|
||||
if lines[start_index] == EMPTY_CHANGE_CONTEXT_MARKER {
|
||||
saw_context_marker = true;
|
||||
start_index += 1;
|
||||
} else if let Some(context) = lines[start_index].strip_prefix(CHANGE_CONTEXT_MARKER) {
|
||||
saw_context_marker = true;
|
||||
context_lines.push(context.to_string());
|
||||
start_index += 1;
|
||||
} else {
|
||||
break;
|
||||
// If we see an explicit context marker @@ or @@ <context>, consume it; otherwise, optionally
|
||||
// allow treating the chunk as starting directly with diff lines.
|
||||
let (change_context, start_index) = if lines[0] == EMPTY_CHANGE_CONTEXT_MARKER {
|
||||
(None, 1)
|
||||
} else if let Some(context) = lines[0].strip_prefix(CHANGE_CONTEXT_MARKER) {
|
||||
(Some(context.to_string()), 1)
|
||||
} else {
|
||||
if !allow_missing_context {
|
||||
return Err(InvalidHunkError {
|
||||
message: format!(
|
||||
"Expected update hunk to start with a @@ context marker, got: '{}'",
|
||||
lines[0]
|
||||
),
|
||||
line_number,
|
||||
});
|
||||
}
|
||||
}
|
||||
if !saw_context_marker && !allow_missing_context {
|
||||
return Err(InvalidHunkError {
|
||||
message: format!(
|
||||
"Expected update hunk to start with a @@ context marker, got: '{}'",
|
||||
lines[0]
|
||||
),
|
||||
line_number,
|
||||
});
|
||||
}
|
||||
(None, 0)
|
||||
};
|
||||
if start_index >= lines.len() {
|
||||
return Err(InvalidHunkError {
|
||||
message: "Update hunk does not contain any lines".to_string(),
|
||||
line_number: line_number + start_index,
|
||||
line_number: line_number + 1,
|
||||
});
|
||||
}
|
||||
let mut chunk = UpdateFileChunk {
|
||||
context_lines,
|
||||
change_context,
|
||||
old_lines: Vec::new(),
|
||||
new_lines: Vec::new(),
|
||||
is_end_of_file: false,
|
||||
@@ -388,7 +381,7 @@ fn parse_update_file_chunk(
|
||||
if parsed_lines == 0 {
|
||||
return Err(InvalidHunkError {
|
||||
message: "Update hunk does not contain any lines".to_string(),
|
||||
line_number: line_number + start_index,
|
||||
line_number: line_number + 1,
|
||||
});
|
||||
}
|
||||
chunk.is_end_of_file = true;
|
||||
@@ -418,7 +411,7 @@ fn parse_update_file_chunk(
|
||||
message: format!(
|
||||
"Unexpected line found in update hunk: '{line_contents}'. Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)"
|
||||
),
|
||||
line_number: line_number + start_index,
|
||||
line_number: line_number + 1,
|
||||
});
|
||||
}
|
||||
// Assume this is the start of the next hunk.
|
||||
@@ -498,7 +491,7 @@ fn test_parse_patch() {
|
||||
path: PathBuf::from("path/update.py"),
|
||||
move_path: Some(PathBuf::from("path/update2.py")),
|
||||
chunks: vec![UpdateFileChunk {
|
||||
context_lines: vec!["def f():".to_string()],
|
||||
change_context: Some("def f():".to_string()),
|
||||
old_lines: vec![" pass".to_string()],
|
||||
new_lines: vec![" return 123".to_string()],
|
||||
is_end_of_file: false
|
||||
@@ -525,7 +518,7 @@ fn test_parse_patch() {
|
||||
path: PathBuf::from("file.py"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
context_lines: Vec::new(),
|
||||
change_context: None,
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["line".to_string()],
|
||||
is_end_of_file: false
|
||||
@@ -555,7 +548,7 @@ fn test_parse_patch() {
|
||||
path: PathBuf::from("file2.py"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
context_lines: Vec::new(),
|
||||
change_context: None,
|
||||
old_lines: vec!["import foo".to_string()],
|
||||
new_lines: vec!["import foo".to_string(), "bar".to_string()],
|
||||
is_end_of_file: false,
|
||||
@@ -575,7 +568,7 @@ fn test_parse_patch_lenient() {
|
||||
path: PathBuf::from("file2.py"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
context_lines: Vec::new(),
|
||||
change_context: None,
|
||||
old_lines: vec!["import foo".to_string()],
|
||||
new_lines: vec!["import foo".to_string(), "bar".to_string()],
|
||||
is_end_of_file: false,
|
||||
@@ -708,7 +701,7 @@ fn test_update_file_chunk() {
|
||||
),
|
||||
Ok((
|
||||
(UpdateFileChunk {
|
||||
context_lines: vec!["change_context".to_string()],
|
||||
change_context: Some("change_context".to_string()),
|
||||
old_lines: vec![
|
||||
"".to_string(),
|
||||
"context".to_string(),
|
||||
@@ -730,7 +723,7 @@ fn test_update_file_chunk() {
|
||||
parse_update_file_chunk(&["@@", "+line", "*** End of File"], 123, false),
|
||||
Ok((
|
||||
(UpdateFileChunk {
|
||||
context_lines: Vec::new(),
|
||||
change_context: None,
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["line".to_string()],
|
||||
is_end_of_file: true
|
||||
@@ -738,29 +731,4 @@ fn test_update_file_chunk() {
|
||||
3
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
parse_update_file_chunk(
|
||||
&[
|
||||
"@@ class BaseClass",
|
||||
"@@ def method()",
|
||||
" context",
|
||||
"-old",
|
||||
"+new",
|
||||
],
|
||||
123,
|
||||
false
|
||||
),
|
||||
Ok((
|
||||
(UpdateFileChunk {
|
||||
context_lines: vec![
|
||||
"class BaseClass".to_string(),
|
||||
" def method()".to_string()
|
||||
],
|
||||
old_lines: vec!["context".to_string(), "old".to_string()],
|
||||
new_lines: vec!["context".to_string(), "new".to_string()],
|
||||
is_end_of_file: false
|
||||
}),
|
||||
5
|
||||
))
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
use std::io::Read;
|
||||
use std::io::Write;
|
||||
|
||||
pub fn main() -> ! {
|
||||
let exit_code = run_main();
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
|
||||
/// We would prefer to return `std::process::ExitCode`, but its `exit_process()`
|
||||
/// method is still a nightly API and we want main() to return !.
|
||||
pub fn run_main() -> i32 {
|
||||
// Expect either one argument (the full apply_patch payload) or read it from stdin.
|
||||
let mut args = std::env::args_os();
|
||||
let _argv0 = args.next();
|
||||
|
||||
let patch_arg = match args.next() {
|
||||
Some(arg) => match arg.into_string() {
|
||||
Ok(s) => s,
|
||||
Err(_) => {
|
||||
eprintln!("Error: apply_patch requires a UTF-8 PATCH argument.");
|
||||
return 1;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
// No argument provided; attempt to read the patch from stdin.
|
||||
let mut buf = String::new();
|
||||
match std::io::stdin().read_to_string(&mut buf) {
|
||||
Ok(_) => {
|
||||
if buf.is_empty() {
|
||||
eprintln!("Usage: apply_patch 'PATCH'\n echo 'PATCH' | apply-patch");
|
||||
return 2;
|
||||
}
|
||||
buf
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error: Failed to read PATCH from stdin.\n{err}");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Refuse extra args to avoid ambiguity.
|
||||
if args.next().is_some() {
|
||||
eprintln!("Error: apply_patch accepts exactly one argument.");
|
||||
return 2;
|
||||
}
|
||||
|
||||
let mut stdout = std::io::stdout();
|
||||
let mut stderr = std::io::stderr();
|
||||
match crate::apply_patch(&patch_arg, &mut stdout, &mut stderr) {
|
||||
Ok(()) => {
|
||||
// Flush to ensure output ordering when used in pipelines.
|
||||
let _ = stdout.flush();
|
||||
0
|
||||
}
|
||||
Err(_) => 1,
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,90 +0,0 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let file = "cli_test.txt";
|
||||
let absolute_path = tmp.path().join(file);
|
||||
|
||||
// 1) Add a file
|
||||
let add_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Add File: {file}
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n");
|
||||
|
||||
// 2) Update the file
|
||||
let update_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: {file}
|
||||
@@
|
||||
-hello
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "world\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let file = "cli_test_stdin.txt";
|
||||
let absolute_path = tmp.path().join(file);
|
||||
|
||||
// 1) Add a file via stdin
|
||||
let add_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Add File: {file}
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n");
|
||||
|
||||
// 2) Update the file via stdin
|
||||
let update_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: {file}
|
||||
@@
|
||||
-hello
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "world\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
mod cli;
|
||||
@@ -16,5 +16,4 @@ codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-core = { path = "../core" }
|
||||
codex-linux-sandbox = { path = "../linux-sandbox" }
|
||||
dotenvy = "0.15.7"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = ["rt-multi-thread"] }
|
||||
|
||||
@@ -3,13 +3,6 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_core::CODEX_APPLY_PATCH_ARG1;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::symlink;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox";
|
||||
const APPLY_PATCH_ARG0: &str = "apply_patch";
|
||||
const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch";
|
||||
|
||||
/// While we want to deploy the Codex CLI as a single executable for simplicity,
|
||||
/// we also want to expose some of its functionality as distinct CLIs, so we use
|
||||
@@ -46,11 +39,9 @@ where
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("");
|
||||
|
||||
if exe_name == LINUX_SANDBOX_ARG0 {
|
||||
if exe_name == "codex-linux-sandbox" {
|
||||
// Safety: [`run_main`] never returns.
|
||||
codex_linux_sandbox::run_main();
|
||||
} else if exe_name == APPLY_PATCH_ARG0 || exe_name == MISSPELLED_APPLY_PATCH_ARG0 {
|
||||
codex_apply_patch::main();
|
||||
}
|
||||
|
||||
let argv1 = args.next().unwrap_or_default();
|
||||
@@ -77,19 +68,6 @@ where
|
||||
// before creating any threads/the Tokio runtime.
|
||||
load_dotenv();
|
||||
|
||||
// Retain the TempDir so it exists for the lifetime of the invocation of
|
||||
// this executable. Admittedly, we could invoke `keep()` on it, but it
|
||||
// would be nice to avoid leaving temporary directories behind, if possible.
|
||||
let _path_entry = match prepend_path_entry_for_apply_patch() {
|
||||
Ok(path_entry) => Some(path_entry),
|
||||
Err(err) => {
|
||||
// It is possible that Codex will proceed successfully even if
|
||||
// updating the PATH fails, so warn the user and move on.
|
||||
eprintln!("WARNING: proceeding, even though we could not update PATH: {err}");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// Regular invocation – create a Tokio runtime and execute the provided
|
||||
// async entry-point.
|
||||
let runtime = tokio::runtime::Runtime::new()?;
|
||||
@@ -135,67 +113,3 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a temporary directory with either:
|
||||
///
|
||||
/// - UNIX: `apply_patch` symlink to the current executable
|
||||
/// - WINDOWS: `apply_patch.bat` batch script to invoke the current executable
|
||||
/// with the "secret" --codex-run-as-apply-patch flag.
|
||||
///
|
||||
/// This temporary directory is prepended to the PATH environment variable so
|
||||
/// that `apply_patch` can be on the PATH without requiring the user to
|
||||
/// install a separate `apply_patch` executable, simplifying the deployment of
|
||||
/// Codex CLI.
|
||||
///
|
||||
/// IMPORTANT: This function modifies the PATH environment variable, so it MUST
|
||||
/// be called before multiple threads are spawned.
|
||||
fn prepend_path_entry_for_apply_patch() -> std::io::Result<TempDir> {
|
||||
let temp_dir = TempDir::new()?;
|
||||
let path = temp_dir.path();
|
||||
|
||||
for filename in &[APPLY_PATCH_ARG0, MISSPELLED_APPLY_PATCH_ARG0] {
|
||||
let exe = std::env::current_exe()?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let link = path.join(filename);
|
||||
symlink(&exe, &link)?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
let batch_script = path.join(format!("{filename}.bat"));
|
||||
std::fs::write(
|
||||
&batch_script,
|
||||
format!(
|
||||
r#"@echo off
|
||||
"{}" {CODEX_APPLY_PATCH_ARG1} %*
|
||||
"#,
|
||||
exe.display()
|
||||
),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
const PATH_SEPARATOR: &str = ":";
|
||||
|
||||
#[cfg(windows)]
|
||||
const PATH_SEPARATOR: &str = ";";
|
||||
|
||||
let path_element = path.display();
|
||||
let updated_path_env_var = match std::env::var("PATH") {
|
||||
Ok(existing_path) => {
|
||||
format!("{path_element}{PATH_SEPARATOR}{existing_path}")
|
||||
}
|
||||
Err(_) => {
|
||||
format!("{path_element}")
|
||||
}
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::env::set_var("PATH", updated_path_env_var);
|
||||
}
|
||||
|
||||
Ok(temp_dir)
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,2 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod apply_command_e2e;
|
||||
@@ -6,7 +6,6 @@ version = { workspace = true }
|
||||
[lib]
|
||||
name = "codex_core"
|
||||
path = "src/lib.rs"
|
||||
doctest = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -29,7 +28,6 @@ libc = "0.2.175"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
mime_guess = "2.0"
|
||||
os_info = "3.12.0"
|
||||
portable-pty = "0.9.0"
|
||||
rand = "0.9"
|
||||
regex-lite = "0.1.6"
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
@@ -52,12 +50,12 @@ tokio = { version = "1", features = [
|
||||
] }
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.4"
|
||||
toml_edit = "0.23.3"
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tree-sitter = "0.25.8"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
whoami = "1.6.1"
|
||||
whoami = "1.6.0"
|
||||
wildmatch = "2.4.0"
|
||||
|
||||
|
||||
|
||||
@@ -623,12 +623,6 @@ where
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded))) => {
|
||||
continue;
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { .. }))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin {
|
||||
call_id: String::new(),
|
||||
query: None,
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,21 +149,7 @@ impl ModelClient {
|
||||
let store = prompt.store && auth_mode != Some(AuthMode::ChatGPT);
|
||||
|
||||
let full_instructions = prompt.get_full_instructions(&self.config.model_family);
|
||||
let mut tools_json = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
// ChatGPT backend expects the preview name for web search.
|
||||
if auth_mode == Some(AuthMode::ChatGPT) {
|
||||
for tool in &mut tools_json {
|
||||
if let Some(map) = tool.as_object_mut()
|
||||
&& map.get("type").and_then(|v| v.as_str()) == Some("web_search")
|
||||
{
|
||||
map.insert(
|
||||
"type".to_string(),
|
||||
serde_json::Value::String("web_search_preview".to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tools_json = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
let reasoning = create_reasoning_param_for_request(
|
||||
&self.config.model_family,
|
||||
self.effort,
|
||||
@@ -480,8 +466,7 @@ async fn process_sse<S>(
|
||||
}
|
||||
};
|
||||
|
||||
let raw = sse.data.clone();
|
||||
trace!("SSE event: {}", raw);
|
||||
trace!("SSE event: {}", sse.data);
|
||||
|
||||
let event: SseEvent = match serde_json::from_str(&sse.data) {
|
||||
Ok(event) => event,
|
||||
@@ -595,24 +580,8 @@ async fn process_sse<S>(
|
||||
| "response.in_progress"
|
||||
| "response.output_item.added"
|
||||
| "response.output_text.done" => {
|
||||
if event.kind == "response.output_item.added"
|
||||
&& let Some(item) = event.item.as_ref()
|
||||
{
|
||||
// Detect web_search_call begin and forward a synthetic event upstream.
|
||||
if let Some(ty) = item.get("type").and_then(|v| v.as_str())
|
||||
&& ty == "web_search_call"
|
||||
{
|
||||
let call_id = item
|
||||
.get("id")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let ev = ResponseEvent::WebSearchCallBegin { call_id, query: None };
|
||||
if tx_event.send(Ok(ev)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Currently, we ignore this event, but we handle it
|
||||
// separately to skip the logging message in the `other` case.
|
||||
}
|
||||
"response.reasoning_summary_part.added" => {
|
||||
// Boundary between reasoning summary sections (e.g., titles).
|
||||
@@ -622,7 +591,7 @@ async fn process_sse<S>(
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_text.done" => {}
|
||||
_ => {}
|
||||
other => debug!(other, "sse event"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,10 +93,6 @@ pub enum ResponseEvent {
|
||||
ReasoningSummaryDelta(String),
|
||||
ReasoningContentDelta(String),
|
||||
ReasoningSummaryPartAdded,
|
||||
WebSearchCallBegin {
|
||||
call_id: String,
|
||||
query: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
|
||||
@@ -14,7 +14,6 @@ use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::protocol::ConversationHistoryResponseEvent;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use futures::prelude::*;
|
||||
@@ -53,18 +52,12 @@ use crate::exec::SandboxType;
|
||||
use crate::exec::StdoutStream;
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec::process_exec_tool_call;
|
||||
use crate::exec_command::EXEC_COMMAND_TOOL_NAME;
|
||||
use crate::exec_command::ExecCommandParams;
|
||||
use crate::exec_command::ExecSessionManager;
|
||||
use crate::exec_command::WRITE_STDIN_TOOL_NAME;
|
||||
use crate::exec_command::WriteStdinParams;
|
||||
use crate::exec_env::create_env;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::mcp_tool_call::handle_mcp_tool_call;
|
||||
use crate::model_family::find_family_for_model;
|
||||
use crate::openai_tools::ApplyPatchToolArgs;
|
||||
use crate::openai_tools::ToolsConfig;
|
||||
use crate::openai_tools::ToolsConfigParams;
|
||||
use crate::openai_tools::get_openai_tools;
|
||||
use crate::parse_command::parse_command;
|
||||
use crate::plan_tool::handle_update_plan;
|
||||
@@ -97,7 +90,6 @@ use crate::protocol::StreamErrorEvent;
|
||||
use crate::protocol::Submission;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TurnDiffEvent;
|
||||
use crate::protocol::WebSearchBeginEvent;
|
||||
use crate::rollout::RolloutRecorder;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_command_safety;
|
||||
@@ -149,23 +141,14 @@ pub struct CodexSpawnOk {
|
||||
}
|
||||
|
||||
pub(crate) const INITIAL_SUBMIT_ID: &str = "";
|
||||
pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64;
|
||||
|
||||
// Model-formatting limits: clients get full streams; oonly content sent to the model is truncated.
|
||||
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
|
||||
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
|
||||
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
|
||||
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
|
||||
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
|
||||
|
||||
impl Codex {
|
||||
/// Spawn a new [`Codex`] and initialize the session.
|
||||
pub async fn spawn(
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
initial_history: Option<Vec<ResponseItem>>,
|
||||
) -> CodexResult<CodexSpawnOk> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(64);
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
|
||||
let user_instructions = get_user_instructions(&config).await;
|
||||
@@ -194,7 +177,6 @@ impl Codex {
|
||||
config.clone(),
|
||||
auth_manager.clone(),
|
||||
tx_event.clone(),
|
||||
initial_history,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -204,12 +186,7 @@ impl Codex {
|
||||
let session_id = session.session_id;
|
||||
|
||||
// This task will run until Op::Shutdown is received.
|
||||
tokio::spawn(submission_loop(
|
||||
session.clone(),
|
||||
turn_context,
|
||||
config,
|
||||
rx_sub,
|
||||
));
|
||||
tokio::spawn(submission_loop(session, turn_context, config, rx_sub));
|
||||
let codex = Codex {
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub,
|
||||
@@ -269,7 +246,6 @@ pub(crate) struct Session {
|
||||
|
||||
/// Manager for external MCP servers/tools.
|
||||
mcp_connection_manager: McpConnectionManager,
|
||||
session_manager: ExecSessionManager,
|
||||
|
||||
/// External notifier command (will be passed as args to exec()). When
|
||||
/// `None` this feature is disabled.
|
||||
@@ -356,7 +332,6 @@ impl Session {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
tx_event: Sender<Event>,
|
||||
initial_history: Option<Vec<ResponseItem>>,
|
||||
) -> anyhow::Result<(Arc<Self>, TurnContext)> {
|
||||
let ConfigureSession {
|
||||
provider,
|
||||
@@ -416,15 +391,14 @@ impl Session {
|
||||
}
|
||||
let rollout_result = match rollout_res {
|
||||
Ok((session_id, maybe_saved, recorder)) => {
|
||||
let restored_items: Option<Vec<ResponseItem>> = initial_history.or_else(|| {
|
||||
let restored_items: Option<Vec<ResponseItem>> =
|
||||
maybe_saved.and_then(|saved_session| {
|
||||
if saved_session.items.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(saved_session.items)
|
||||
}
|
||||
})
|
||||
});
|
||||
});
|
||||
RolloutResult {
|
||||
session_id,
|
||||
rollout_recorder: Some(recorder),
|
||||
@@ -508,15 +482,13 @@ impl Session {
|
||||
);
|
||||
let turn_context = TurnContext {
|
||||
client,
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &config.model_family,
|
||||
tools_config: ToolsConfig::new(
|
||||
&config.model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config.use_experimental_streamable_shell_tool,
|
||||
}),
|
||||
sandbox_policy.clone(),
|
||||
config.include_plan_tool,
|
||||
config.include_apply_patch_tool,
|
||||
),
|
||||
user_instructions,
|
||||
base_instructions,
|
||||
approval_policy,
|
||||
@@ -529,7 +501,6 @@ impl Session {
|
||||
session_id,
|
||||
tx_event: tx_event.clone(),
|
||||
mcp_connection_manager,
|
||||
session_manager: ExecSessionManager::default(),
|
||||
notify,
|
||||
state: Mutex::new(state),
|
||||
rollout: Mutex::new(rollout_recorder),
|
||||
@@ -730,6 +701,7 @@ impl Session {
|
||||
let _ = self.tx_event.send(event).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_exec_command_end(
|
||||
&self,
|
||||
turn_diff_tracker: &mut TurnDiffTracker,
|
||||
@@ -741,15 +713,14 @@ impl Session {
|
||||
let ExecToolCallOutput {
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
duration,
|
||||
exit_code,
|
||||
} = output;
|
||||
// Send full stdout/stderr to clients; do not truncate.
|
||||
let stdout = stdout.text.clone();
|
||||
let stderr = stderr.text.clone();
|
||||
let formatted_output = format_exec_output_str(output);
|
||||
let aggregated_output: String = aggregated_output.text.clone();
|
||||
// Because stdout and stderr could each be up to 100 KiB, we send
|
||||
// truncated versions.
|
||||
const MAX_STREAM_OUTPUT: usize = 5 * 1024; // 5KiB
|
||||
let stdout = stdout.text.chars().take(MAX_STREAM_OUTPUT).collect();
|
||||
let stderr = stderr.text.chars().take(MAX_STREAM_OUTPUT).collect();
|
||||
|
||||
let msg = if is_apply_patch {
|
||||
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
|
||||
@@ -763,10 +734,8 @@ impl Session {
|
||||
call_id: call_id.to_string(),
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
exit_code: *exit_code,
|
||||
duration: *duration,
|
||||
formatted_output,
|
||||
exit_code: *exit_code,
|
||||
})
|
||||
};
|
||||
|
||||
@@ -824,7 +793,6 @@ impl Session {
|
||||
exit_code: -1,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(get_error_message_ui(e)),
|
||||
aggregated_output: StreamOutput::new(get_error_message_ui(e)),
|
||||
duration: Duration::default(),
|
||||
};
|
||||
&output_stderr
|
||||
@@ -1095,15 +1063,13 @@ async fn submission_loop(
|
||||
.unwrap_or(prev.sandbox_policy.clone());
|
||||
let new_cwd = cwd.clone().unwrap_or_else(|| prev.cwd.clone());
|
||||
|
||||
let tools_config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &effective_family,
|
||||
approval_policy: new_approval_policy,
|
||||
sandbox_policy: new_sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config.use_experimental_streamable_shell_tool,
|
||||
});
|
||||
let tools_config = ToolsConfig::new(
|
||||
&effective_family,
|
||||
new_approval_policy,
|
||||
new_sandbox_policy.clone(),
|
||||
config.include_plan_tool,
|
||||
config.include_apply_patch_tool,
|
||||
);
|
||||
|
||||
let new_turn_context = TurnContext {
|
||||
client,
|
||||
@@ -1175,16 +1141,13 @@ async fn submission_loop(
|
||||
|
||||
let fresh_turn_context = TurnContext {
|
||||
client,
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
tools_config: ToolsConfig::new(
|
||||
&model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
use_streamable_shell_tool: config
|
||||
.use_experimental_streamable_shell_tool,
|
||||
}),
|
||||
sandbox_policy.clone(),
|
||||
config.include_plan_tool,
|
||||
config.include_apply_patch_tool,
|
||||
),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
approval_policy,
|
||||
@@ -1322,21 +1285,6 @@ async fn submission_loop(
|
||||
}
|
||||
break;
|
||||
}
|
||||
Op::GetHistory => {
|
||||
let tx_event = sess.tx_event.clone();
|
||||
let sub_id = sub.id.clone();
|
||||
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::ConversationHistory(ConversationHistoryResponseEvent {
|
||||
conversation_id: sess.session_id,
|
||||
entries: sess.state.lock_unchecked().history.contents(),
|
||||
}),
|
||||
};
|
||||
if let Err(e) = tx_event.send(event).await {
|
||||
warn!("failed to send ConversationHistory event: {e}");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Ignore unknown ops; enum is non_exhaustive to allow extensions.
|
||||
}
|
||||
@@ -1695,7 +1643,6 @@ async fn try_run_turn(
|
||||
let mut stream = turn_context.client.clone().stream(&prompt).await?;
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
loop {
|
||||
// Poll the next item from the model stream. We must inspect *both* Ok and Err
|
||||
// cases so that transient stream failures (e.g., dropped SSE connection before
|
||||
@@ -1732,16 +1679,6 @@ async fn try_run_turn(
|
||||
.await?;
|
||||
output.push(ProcessedResponseItem { item, response });
|
||||
}
|
||||
ResponseEvent::WebSearchCallBegin { call_id, query } => {
|
||||
let q = query.unwrap_or_else(|| "Searching Web...".to_string());
|
||||
let _ = sess
|
||||
.tx_event
|
||||
.send(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::WebSearchBegin(WebSearchBeginEvent { call_id, query: q }),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
ResponseEvent::Completed {
|
||||
response_id: _,
|
||||
token_usage,
|
||||
@@ -2100,54 +2037,6 @@ async fn handle_function_call(
|
||||
.await
|
||||
}
|
||||
"update_plan" => handle_update_plan(sess, arguments, sub_id, call_id).await,
|
||||
EXEC_COMMAND_TOOL_NAME => {
|
||||
// TODO(mbolin): Sandbox check.
|
||||
let exec_params = match serde_json::from_str::<ExecCommandParams>(&arguments) {
|
||||
Ok(params) => params,
|
||||
Err(e) => {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("failed to parse function arguments: {e}"),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
let result = sess
|
||||
.session_manager
|
||||
.handle_exec_command_request(exec_params)
|
||||
.await;
|
||||
let function_call_output = crate::exec_command::result_into_payload(result);
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: function_call_output,
|
||||
}
|
||||
}
|
||||
WRITE_STDIN_TOOL_NAME => {
|
||||
let write_stdin_params = match serde_json::from_str::<WriteStdinParams>(&arguments) {
|
||||
Ok(params) => params,
|
||||
Err(e) => {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("failed to parse function arguments: {e}"),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
let result = sess
|
||||
.session_manager
|
||||
.handle_write_stdin_request(write_stdin_params)
|
||||
.await;
|
||||
let function_call_output: FunctionCallOutputPayload =
|
||||
crate::exec_command::result_into_payload(result);
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: function_call_output,
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
match sess.mcp_connection_manager.parse_tool_name(&name) {
|
||||
Some((server, tool_name)) => {
|
||||
@@ -2468,7 +2357,7 @@ async fn handle_container_exec_with_params(
|
||||
let ExecToolCallOutput { exit_code, .. } = &output;
|
||||
|
||||
let is_success = *exit_code == 0;
|
||||
let content = format_exec_output(&output);
|
||||
let content = format_exec_output(output);
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
output: FunctionCallOutputPayload {
|
||||
@@ -2601,7 +2490,7 @@ async fn handle_sandbox_error(
|
||||
let ExecToolCallOutput { exit_code, .. } = &retry_output;
|
||||
|
||||
let is_success = *exit_code == 0;
|
||||
let content = format_exec_output(&retry_output);
|
||||
let content = format_exec_output(retry_output);
|
||||
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
@@ -2633,113 +2522,13 @@ async fn handle_sandbox_error(
|
||||
}
|
||||
}
|
||||
|
||||
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
aggregated_output, ..
|
||||
} = exec_output;
|
||||
|
||||
// Head+tail truncation for the model: show the beginning and end with an elision.
|
||||
// Clients still receive full streams; only this formatted summary is capped.
|
||||
|
||||
let s = aggregated_output.text.as_str();
|
||||
let total_lines = s.lines().count();
|
||||
if s.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
|
||||
return s.to_string();
|
||||
}
|
||||
|
||||
let lines: Vec<&str> = s.lines().collect();
|
||||
let head_take = MODEL_FORMAT_HEAD_LINES.min(lines.len());
|
||||
let tail_take = MODEL_FORMAT_TAIL_LINES.min(lines.len().saturating_sub(head_take));
|
||||
let omitted = lines.len().saturating_sub(head_take + tail_take);
|
||||
|
||||
// Join head and tail blocks (lines() strips newlines; reinsert them)
|
||||
let head_block = lines
|
||||
.iter()
|
||||
.take(head_take)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
let tail_block = if tail_take > 0 {
|
||||
lines[lines.len() - tail_take..].join("\n")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let marker = format!("\n[... omitted {omitted} of {total_lines} lines ...]\n\n");
|
||||
|
||||
// Byte budgets for head/tail around the marker
|
||||
let mut head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
|
||||
let tail_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(head_budget + marker.len());
|
||||
if tail_budget == 0 && marker.len() >= MODEL_FORMAT_MAX_BYTES {
|
||||
// Degenerate case: marker alone exceeds budget; return a clipped marker
|
||||
return take_bytes_at_char_boundary(&marker, MODEL_FORMAT_MAX_BYTES).to_string();
|
||||
}
|
||||
if tail_budget == 0 {
|
||||
// Make room for the marker by shrinking head
|
||||
head_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(marker.len());
|
||||
}
|
||||
|
||||
// Enforce line-count cap by trimming head/tail lines
|
||||
let head_lines_text = head_block;
|
||||
let tail_lines_text = tail_block;
|
||||
// Build final string respecting byte budgets
|
||||
let head_part = take_bytes_at_char_boundary(&head_lines_text, head_budget);
|
||||
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(s.len()));
|
||||
result.push_str(head_part);
|
||||
result.push_str(&marker);
|
||||
|
||||
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
|
||||
let tail_budget_final = remaining;
|
||||
let tail_part = take_last_bytes_at_char_boundary(&tail_lines_text, tail_budget_final);
|
||||
result.push_str(tail_part);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
// Truncate a &str to a byte budget at a char boundary (prefix)
|
||||
#[inline]
|
||||
fn take_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
|
||||
if s.len() <= maxb {
|
||||
return s;
|
||||
}
|
||||
let mut last_ok = 0;
|
||||
for (i, ch) in s.char_indices() {
|
||||
let nb = i + ch.len_utf8();
|
||||
if nb > maxb {
|
||||
break;
|
||||
}
|
||||
last_ok = nb;
|
||||
}
|
||||
&s[..last_ok]
|
||||
}
|
||||
|
||||
// Take a suffix of a &str within a byte budget at a char boundary
|
||||
#[inline]
|
||||
fn take_last_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
|
||||
if s.len() <= maxb {
|
||||
return s;
|
||||
}
|
||||
let mut start = s.len();
|
||||
let mut used = 0usize;
|
||||
for (i, ch) in s.char_indices().rev() {
|
||||
let nb = ch.len_utf8();
|
||||
if used + nb > maxb {
|
||||
break;
|
||||
}
|
||||
start = i;
|
||||
used += nb;
|
||||
if start == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
&s[start..]
|
||||
}
|
||||
|
||||
/// Exec output is a pre-serialized JSON payload
|
||||
fn format_exec_output(exec_output: &ExecToolCallOutput) -> String {
|
||||
fn format_exec_output(exec_output: ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
exit_code,
|
||||
stdout,
|
||||
stderr,
|
||||
duration,
|
||||
..
|
||||
} = exec_output;
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -2757,12 +2546,20 @@ fn format_exec_output(exec_output: &ExecToolCallOutput) -> String {
|
||||
// round to 1 decimal place
|
||||
let duration_seconds = ((duration.as_secs_f32()) * 10.0).round() / 10.0;
|
||||
|
||||
let formatted_output = format_exec_output_str(exec_output);
|
||||
let is_success = exit_code == 0;
|
||||
let output = if is_success { stdout } else { stderr };
|
||||
|
||||
let mut formatted_output = output.text;
|
||||
if let Some(truncated_after_lines) = output.truncated_after_lines {
|
||||
formatted_output.push_str(&format!(
|
||||
"\n\n[Output truncated after {truncated_after_lines} lines: too many lines or bytes.]",
|
||||
));
|
||||
}
|
||||
|
||||
let payload = ExecOutput {
|
||||
output: &formatted_output,
|
||||
metadata: ExecMetadata {
|
||||
exit_code: *exit_code,
|
||||
exit_code,
|
||||
duration_seconds,
|
||||
},
|
||||
};
|
||||
@@ -2816,9 +2613,15 @@ async fn drain_to_completed(
|
||||
response_id: _,
|
||||
token_usage,
|
||||
}) => {
|
||||
// some providers don't return token usage, so we default
|
||||
// TODO: consider approximate token usage
|
||||
let token_usage = token_usage.unwrap_or_default();
|
||||
let token_usage = match token_usage {
|
||||
Some(usage) => usage,
|
||||
None => {
|
||||
return Err(CodexErr::Stream(
|
||||
"token_usage was None in ResponseEvent::Completed".into(),
|
||||
None,
|
||||
));
|
||||
}
|
||||
};
|
||||
sess.tx_event
|
||||
.send(Event {
|
||||
id: sub_id.to_string(),
|
||||
@@ -2826,7 +2629,6 @@ async fn drain_to_completed(
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Ok(_) => continue,
|
||||
@@ -2877,7 +2679,6 @@ mod tests {
|
||||
use mcp_types::TextContent;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::time::Duration as StdDuration;
|
||||
|
||||
fn text_block(s: &str) -> ContentBlock {
|
||||
ContentBlock::TextContent(TextContent {
|
||||
@@ -2912,82 +2713,6 @@ mod tests {
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn model_truncation_head_tail_by_lines() {
|
||||
// Build 400 short lines so line-count limit, not byte budget, triggers truncation
|
||||
let lines: Vec<String> = (1..=400).map(|i| format!("line{i}")).collect();
|
||||
let full = lines.join("\n");
|
||||
|
||||
let exec = ExecToolCallOutput {
|
||||
exit_code: 0,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(full.clone()),
|
||||
duration: StdDuration::from_secs(1),
|
||||
};
|
||||
|
||||
let out = format_exec_output_str(&exec);
|
||||
|
||||
// Expect elision marker with correct counts
|
||||
let omitted = 400 - MODEL_FORMAT_MAX_LINES; // 144
|
||||
let marker = format!("\n[... omitted {omitted} of 400 lines ...]\n\n");
|
||||
assert!(out.contains(&marker), "missing marker: {out}");
|
||||
|
||||
// Validate head and tail
|
||||
let parts: Vec<&str> = out.split(&marker).collect();
|
||||
assert_eq!(parts.len(), 2, "expected one marker split");
|
||||
let head = parts[0];
|
||||
let tail = parts[1];
|
||||
|
||||
let expected_head: String = (1..=MODEL_FORMAT_HEAD_LINES)
|
||||
.map(|i| format!("line{i}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
assert!(head.starts_with(&expected_head), "head mismatch");
|
||||
|
||||
let expected_tail: String = ((400 - MODEL_FORMAT_TAIL_LINES + 1)..=400)
|
||||
.map(|i| format!("line{i}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
assert!(tail.ends_with(&expected_tail), "tail mismatch");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn model_truncation_respects_byte_budget() {
|
||||
// Construct a large output (about 100kB) so byte budget dominates
|
||||
let big_line = "x".repeat(100);
|
||||
let full = std::iter::repeat_n(big_line.clone(), 1000)
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
let exec = ExecToolCallOutput {
|
||||
exit_code: 0,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(full.clone()),
|
||||
duration: StdDuration::from_secs(1),
|
||||
};
|
||||
|
||||
let out = format_exec_output_str(&exec);
|
||||
assert!(out.len() <= MODEL_FORMAT_MAX_BYTES, "exceeds byte budget");
|
||||
assert!(out.contains("omitted"), "should contain elision marker");
|
||||
|
||||
// Ensure head and tail are drawn from the original
|
||||
assert!(full.starts_with(out.chars().take(8).collect::<String>().as_str()));
|
||||
assert!(
|
||||
full.ends_with(
|
||||
out.chars()
|
||||
.rev()
|
||||
.take(8)
|
||||
.collect::<String>()
|
||||
.chars()
|
||||
.rev()
|
||||
.collect::<String>()
|
||||
.as_str()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn falls_back_to_content_when_structured_is_null() {
|
||||
let ctr = CallToolResult {
|
||||
|
||||
@@ -169,15 +169,11 @@ pub struct Config {
|
||||
/// model family's default preference.
|
||||
pub include_apply_patch_tool: bool,
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub responses_originator_header: String,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: AuthMode,
|
||||
|
||||
pub use_experimental_streamable_shell_tool: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -473,8 +469,6 @@ pub struct ConfigToml {
|
||||
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub responses_originator_header_internal_override: Option<String>,
|
||||
|
||||
@@ -482,9 +476,6 @@ pub struct ConfigToml {
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: Option<AuthMode>,
|
||||
|
||||
/// Nested tools section for feature toggles
|
||||
pub tools: Option<ToolsToml>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
@@ -492,13 +483,6 @@ pub struct ProjectConfig {
|
||||
pub trust_level: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default)]
|
||||
pub struct ToolsToml {
|
||||
// Renamed from `web_search_request`; keep alias for backwards compatibility.
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
}
|
||||
|
||||
impl ConfigToml {
|
||||
/// Derive the effective sandbox policy from the configuration.
|
||||
fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy {
|
||||
@@ -588,7 +572,6 @@ pub struct ConfigOverrides {
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub disable_response_storage: Option<bool>,
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
pub tools_web_search_request: Option<bool>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -615,7 +598,6 @@ impl Config {
|
||||
include_apply_patch_tool,
|
||||
disable_response_storage,
|
||||
show_raw_agent_reasoning,
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
} = overrides;
|
||||
|
||||
let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
|
||||
@@ -654,7 +636,7 @@ impl Config {
|
||||
})?
|
||||
.clone();
|
||||
|
||||
let shell_environment_policy = cfg.shell_environment_policy.clone().into();
|
||||
let shell_environment_policy = cfg.shell_environment_policy.into();
|
||||
|
||||
let resolved_cwd = {
|
||||
use std::env;
|
||||
@@ -675,11 +657,7 @@ impl Config {
|
||||
}
|
||||
};
|
||||
|
||||
let history = cfg.history.clone().unwrap_or_default();
|
||||
|
||||
let tools_web_search_request = override_tools_web_search_request
|
||||
.or(cfg.tools.as_ref().and_then(|t| t.web_search))
|
||||
.unwrap_or(false);
|
||||
let history = cfg.history.unwrap_or_default();
|
||||
|
||||
let model = model
|
||||
.or(config_profile.model)
|
||||
@@ -753,7 +731,7 @@ impl Config {
|
||||
codex_home,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
tui: cfg.tui.clone().unwrap_or_default(),
|
||||
tui: cfg.tui.unwrap_or_default(),
|
||||
codex_linux_sandbox_exe,
|
||||
|
||||
hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
|
||||
@@ -772,18 +750,14 @@ impl Config {
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url.clone())
|
||||
.or(cfg.chatgpt_base_url)
|
||||
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
||||
|
||||
experimental_resume,
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
responses_originator_header,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
.experimental_use_exec_command_tool
|
||||
.unwrap_or(false),
|
||||
};
|
||||
Ok(config)
|
||||
}
|
||||
@@ -1148,10 +1122,8 @@ disable_response_storage = true
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -1204,10 +1176,8 @@ disable_response_storage = true
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -1275,10 +1245,8 @@ disable_response_storage = true
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
|
||||
@@ -16,7 +16,6 @@ use crate::error::Result as CodexResult;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Represents a newly created Codex conversation, including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
@@ -60,18 +59,8 @@ impl ConversationManager {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
session_id: conversation_id,
|
||||
} = {
|
||||
let initial_history = None;
|
||||
Codex::spawn(config, auth_manager, initial_history).await?
|
||||
};
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
} = Codex::spawn(config, auth_manager).await?;
|
||||
|
||||
async fn finalize_spawn(
|
||||
&self,
|
||||
codex: Codex,
|
||||
conversation_id: Uuid,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// The first event must be `SessionInitialized`. Validate and forward it
|
||||
// to the caller so that they can display it in the conversation
|
||||
// history.
|
||||
@@ -109,120 +98,4 @@ impl ConversationManager {
|
||||
.cloned()
|
||||
.ok_or_else(|| CodexErr::ConversationNotFound(conversation_id))
|
||||
}
|
||||
|
||||
/// Fork an existing conversation by dropping the last `drop_last_messages`
|
||||
/// user/assistant messages from its transcript and starting a new
|
||||
/// conversation with identical configuration (unless overridden by the
|
||||
/// caller's `config`). The new conversation will have a fresh id.
|
||||
pub async fn fork_conversation(
|
||||
&self,
|
||||
conversation_history: Vec<ResponseItem>,
|
||||
num_messages_to_drop: usize,
|
||||
config: Config,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// Compute the prefix up to the cut point.
|
||||
let truncated_history =
|
||||
truncate_after_dropping_last_messages(conversation_history, num_messages_to_drop);
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
session_id: conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, Some(truncated_history)).await?;
|
||||
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a prefix of `items` obtained by dropping the last `n` user messages
|
||||
/// and all items that follow them.
|
||||
fn truncate_after_dropping_last_messages(items: Vec<ResponseItem>, n: usize) -> Vec<ResponseItem> {
|
||||
if n == 0 || items.is_empty() {
|
||||
return items;
|
||||
}
|
||||
|
||||
// Walk backwards counting only `user` Message items, find cut index.
|
||||
let mut count = 0usize;
|
||||
let mut cut_index = 0usize;
|
||||
for (idx, item) in items.iter().enumerate().rev() {
|
||||
if let ResponseItem::Message { role, .. } = item
|
||||
&& role == "user"
|
||||
{
|
||||
count += 1;
|
||||
if count == n {
|
||||
// Cut everything from this user message to the end.
|
||||
cut_index = idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if count < n {
|
||||
// If fewer than n messages exist, drop everything.
|
||||
Vec::new()
|
||||
} else {
|
||||
items.into_iter().take(cut_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
fn user_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: text.to_string(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
fn assistant_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: text.to_string(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drops_from_last_user_only() {
|
||||
let items = vec![
|
||||
user_msg("u1"),
|
||||
assistant_msg("a1"),
|
||||
assistant_msg("a2"),
|
||||
user_msg("u2"),
|
||||
assistant_msg("a3"),
|
||||
ResponseItem::Reasoning {
|
||||
id: "r1".to_string(),
|
||||
summary: vec![ReasoningItemReasoningSummary::SummaryText {
|
||||
text: "s".to_string(),
|
||||
}],
|
||||
content: None,
|
||||
encrypted_content: None,
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "tool".to_string(),
|
||||
arguments: "{}".to_string(),
|
||||
call_id: "c1".to_string(),
|
||||
},
|
||||
assistant_msg("a4"),
|
||||
];
|
||||
|
||||
let truncated = truncate_after_dropping_last_messages(items.clone(), 1);
|
||||
assert_eq!(
|
||||
truncated,
|
||||
vec![items[0].clone(), items[1].clone(), items[2].clone()]
|
||||
);
|
||||
|
||||
let truncated2 = truncate_after_dropping_last_messages(items, 2);
|
||||
assert!(truncated2.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,17 +28,18 @@ use crate::spawn::StdioPolicy;
|
||||
use crate::spawn::spawn_child_async;
|
||||
use serde_bytes::ByteBuf;
|
||||
|
||||
// Maximum we send for each stream, which is either:
|
||||
// - 10KiB OR
|
||||
// - 256 lines
|
||||
const MAX_STREAM_OUTPUT: usize = 10 * 1024;
|
||||
const MAX_STREAM_OUTPUT_LINES: usize = 256;
|
||||
|
||||
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
|
||||
|
||||
// Hardcode these since it does not seem worth including the libc crate just
|
||||
// for these.
|
||||
const SIGKILL_CODE: i32 = 9;
|
||||
const TIMEOUT_CODE: i32 = 64;
|
||||
const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
|
||||
|
||||
// I/O buffer sizing
|
||||
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
|
||||
const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExecParams {
|
||||
@@ -152,7 +153,6 @@ pub async fn process_exec_tool_call(
|
||||
exit_code,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output: raw_output.aggregated_output.from_utf8_lossy(),
|
||||
duration,
|
||||
})
|
||||
}
|
||||
@@ -189,11 +189,10 @@ pub struct StreamOutput<T> {
|
||||
pub truncated_after_lines: Option<u32>,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
struct RawExecToolCallOutput {
|
||||
pub struct RawExecToolCallOutput {
|
||||
pub exit_status: ExitStatus,
|
||||
pub stdout: StreamOutput<Vec<u8>>,
|
||||
pub stderr: StreamOutput<Vec<u8>>,
|
||||
pub aggregated_output: StreamOutput<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StreamOutput<String> {
|
||||
@@ -214,17 +213,11 @@ impl StreamOutput<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn append_all(dst: &mut Vec<u8>, src: &[u8]) {
|
||||
dst.extend_from_slice(src);
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ExecToolCallOutput {
|
||||
pub exit_code: i32,
|
||||
pub stdout: StreamOutput<String>,
|
||||
pub stderr: StreamOutput<String>,
|
||||
pub aggregated_output: StreamOutput<String>,
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
@@ -260,7 +253,7 @@ async fn exec(
|
||||
|
||||
/// Consumes the output of a child process, truncating it so it is suitable for
|
||||
/// use as the output of a `shell` tool call. Also enforces specified timeout.
|
||||
async fn consume_truncated_output(
|
||||
pub(crate) async fn consume_truncated_output(
|
||||
mut child: Child,
|
||||
timeout: Duration,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
@@ -280,19 +273,19 @@ async fn consume_truncated_output(
|
||||
))
|
||||
})?;
|
||||
|
||||
let (agg_tx, agg_rx) = async_channel::unbounded::<Vec<u8>>();
|
||||
|
||||
let stdout_handle = tokio::spawn(read_capped(
|
||||
BufReader::new(stdout_reader),
|
||||
MAX_STREAM_OUTPUT,
|
||||
MAX_STREAM_OUTPUT_LINES,
|
||||
stdout_stream.clone(),
|
||||
false,
|
||||
Some(agg_tx.clone()),
|
||||
));
|
||||
let stderr_handle = tokio::spawn(read_capped(
|
||||
BufReader::new(stderr_reader),
|
||||
MAX_STREAM_OUTPUT,
|
||||
MAX_STREAM_OUTPUT_LINES,
|
||||
stdout_stream.clone(),
|
||||
true,
|
||||
Some(agg_tx.clone()),
|
||||
));
|
||||
|
||||
let exit_status = tokio::select! {
|
||||
@@ -304,48 +297,38 @@ async fn consume_truncated_output(
|
||||
// timeout
|
||||
child.start_kill()?;
|
||||
// Debatable whether `child.wait().await` should be called here.
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE)
|
||||
synthetic_exit_status(128 + TIMEOUT_CODE)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
child.start_kill()?;
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE)
|
||||
synthetic_exit_status(128 + SIGKILL_CODE)
|
||||
}
|
||||
};
|
||||
|
||||
let stdout = stdout_handle.await??;
|
||||
let stderr = stderr_handle.await??;
|
||||
|
||||
drop(agg_tx);
|
||||
|
||||
let mut combined_buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
|
||||
while let Ok(chunk) = agg_rx.recv().await {
|
||||
append_all(&mut combined_buf, &chunk);
|
||||
}
|
||||
let aggregated_output = StreamOutput {
|
||||
text: combined_buf,
|
||||
truncated_after_lines: None,
|
||||
};
|
||||
|
||||
Ok(RawExecToolCallOutput {
|
||||
exit_status,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
|
||||
mut reader: R,
|
||||
max_output: usize,
|
||||
max_lines: usize,
|
||||
stream: Option<StdoutStream>,
|
||||
is_stderr: bool,
|
||||
aggregate_tx: Option<Sender<Vec<u8>>>,
|
||||
) -> io::Result<StreamOutput<Vec<u8>>> {
|
||||
let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
|
||||
let mut tmp = [0u8; READ_CHUNK_SIZE];
|
||||
let mut buf = Vec::with_capacity(max_output.min(8 * 1024));
|
||||
let mut tmp = [0u8; 8192];
|
||||
|
||||
// No caps: append all bytes
|
||||
let mut remaining_bytes = max_output;
|
||||
let mut remaining_lines = max_lines;
|
||||
|
||||
loop {
|
||||
let n = reader.read(&mut tmp).await?;
|
||||
@@ -372,17 +355,33 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
|
||||
let _ = stream.tx_event.send(event).await;
|
||||
}
|
||||
|
||||
if let Some(tx) = &aggregate_tx {
|
||||
let _ = tx.send(tmp[..n].to_vec()).await;
|
||||
// Copy into the buffer only while we still have byte and line budget.
|
||||
if remaining_bytes > 0 && remaining_lines > 0 {
|
||||
let mut copy_len = 0;
|
||||
for &b in &tmp[..n] {
|
||||
if remaining_bytes == 0 || remaining_lines == 0 {
|
||||
break;
|
||||
}
|
||||
copy_len += 1;
|
||||
remaining_bytes -= 1;
|
||||
if b == b'\n' {
|
||||
remaining_lines -= 1;
|
||||
}
|
||||
}
|
||||
buf.extend_from_slice(&tmp[..copy_len]);
|
||||
}
|
||||
|
||||
append_all(&mut buf, &tmp[..n]);
|
||||
// Continue reading to EOF to avoid back-pressure
|
||||
// Continue reading to EOF to avoid back-pressure, but discard once caps are hit.
|
||||
}
|
||||
|
||||
let truncated = remaining_lines == 0 || remaining_bytes == 0;
|
||||
|
||||
Ok(StreamOutput {
|
||||
text: buf,
|
||||
truncated_after_lines: None,
|
||||
truncated_after_lines: if truncated {
|
||||
Some((max_lines - remaining_lines) as u32)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ExecCommandParams {
|
||||
pub(crate) cmd: String,
|
||||
|
||||
#[serde(default = "default_yield_time")]
|
||||
pub(crate) yield_time_ms: u64,
|
||||
|
||||
#[serde(default = "max_output_tokens")]
|
||||
pub(crate) max_output_tokens: u64,
|
||||
|
||||
#[serde(default = "default_shell")]
|
||||
pub(crate) shell: String,
|
||||
|
||||
#[serde(default = "default_login")]
|
||||
pub(crate) login: bool,
|
||||
}
|
||||
|
||||
fn default_yield_time() -> u64 {
|
||||
10_000
|
||||
}
|
||||
|
||||
fn max_output_tokens() -> u64 {
|
||||
10_000
|
||||
}
|
||||
|
||||
fn default_login() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_shell() -> String {
|
||||
"/bin/bash".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct WriteStdinParams {
|
||||
pub(crate) session_id: SessionId,
|
||||
pub(crate) chars: String,
|
||||
|
||||
#[serde(default = "write_stdin_default_yield_time_ms")]
|
||||
pub(crate) yield_time_ms: u64,
|
||||
|
||||
#[serde(default = "write_stdin_default_max_output_tokens")]
|
||||
pub(crate) max_output_tokens: u64,
|
||||
}
|
||||
|
||||
fn write_stdin_default_yield_time_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
fn write_stdin_default_max_output_tokens() -> u64 {
|
||||
10_000
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
use std::sync::Mutex as StdMutex;
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ExecCommandSession {
|
||||
/// Queue for writing bytes to the process stdin (PTY master write side).
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
/// Broadcast stream of output chunks read from the PTY. New subscribers
|
||||
/// receive only chunks emitted after they subscribe.
|
||||
output_tx: broadcast::Sender<Vec<u8>>,
|
||||
|
||||
/// Child killer handle for termination on drop (can signal independently
|
||||
/// of a thread blocked in `.wait()`).
|
||||
killer: StdMutex<Option<Box<dyn portable_pty::ChildKiller + Send + Sync>>>,
|
||||
|
||||
/// JoinHandle for the blocking PTY reader task.
|
||||
reader_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
|
||||
/// JoinHandle for the stdin writer task.
|
||||
writer_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
|
||||
/// JoinHandle for the child wait task.
|
||||
wait_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
}
|
||||
|
||||
impl ExecCommandSession {
|
||||
pub(crate) fn new(
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
output_tx: broadcast::Sender<Vec<u8>>,
|
||||
killer: Box<dyn portable_pty::ChildKiller + Send + Sync>,
|
||||
reader_handle: JoinHandle<()>,
|
||||
writer_handle: JoinHandle<()>,
|
||||
wait_handle: JoinHandle<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
self.writer_tx.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn output_receiver(&self) -> broadcast::Receiver<Vec<u8>> {
|
||||
self.output_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ExecCommandSession {
|
||||
fn drop(&mut self) {
|
||||
// Best-effort: terminate child first so blocking tasks can complete.
|
||||
if let Ok(mut killer_opt) = self.killer.lock()
|
||||
&& let Some(mut killer) = killer_opt.take()
|
||||
{
|
||||
let _ = killer.kill();
|
||||
}
|
||||
|
||||
// Abort background tasks; they may already have exited after kill.
|
||||
if let Ok(mut h) = self.reader_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
if let Ok(mut h) = self.writer_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
if let Ok(mut h) = self.wait_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
mod exec_command_params;
|
||||
mod exec_command_session;
|
||||
mod responses_api;
|
||||
mod session_id;
|
||||
mod session_manager;
|
||||
|
||||
pub use exec_command_params::ExecCommandParams;
|
||||
pub use exec_command_params::WriteStdinParams;
|
||||
pub use responses_api::EXEC_COMMAND_TOOL_NAME;
|
||||
pub use responses_api::WRITE_STDIN_TOOL_NAME;
|
||||
pub use responses_api::create_exec_command_tool_for_responses_api;
|
||||
pub use responses_api::create_write_stdin_tool_for_responses_api;
|
||||
pub use session_manager::SessionManager as ExecSessionManager;
|
||||
pub use session_manager::result_into_payload;
|
||||
@@ -1,98 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
pub const EXEC_COMMAND_TOOL_NAME: &str = "exec_command";
|
||||
pub const WRITE_STDIN_TOOL_NAME: &str = "write_stdin";
|
||||
|
||||
pub fn create_exec_command_tool_for_responses_api() -> ResponsesApiTool {
|
||||
let mut properties = BTreeMap::<String, JsonSchema>::new();
|
||||
properties.insert(
|
||||
"cmd".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The shell command to execute.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"yield_time_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum time in milliseconds to wait for output.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum number of tokens to output.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"shell".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The shell to use. Defaults to \"/bin/bash\".".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"login".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some(
|
||||
"Whether to run the command as a login shell. Defaults to true.".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
|
||||
ResponsesApiTool {
|
||||
name: EXEC_COMMAND_TOOL_NAME.to_owned(),
|
||||
description: r#"Execute shell commands on the local machine with streaming output."#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["cmd".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_write_stdin_tool_for_responses_api() -> ResponsesApiTool {
|
||||
let mut properties = BTreeMap::<String, JsonSchema>::new();
|
||||
properties.insert(
|
||||
"session_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The ID of the exec_command session.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"chars".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The characters to write to stdin.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"yield_time_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"The maximum time in milliseconds to wait for output after writing.".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum number of tokens to output.".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
ResponsesApiTool {
|
||||
name: WRITE_STDIN_TOOL_NAME.to_owned(),
|
||||
description: r#"Write characters to an exec session's stdin. Returns all stdout+stderr received within yield_time_ms.
|
||||
Can write control characters (\u0003 for Ctrl-C), or an empty string to just poll stdout+stderr."#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["session_id".to_string(), "chars".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub(crate) struct SessionId(pub u32);
|
||||
@@ -1,674 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use portable_pty::CommandBuilder;
|
||||
use portable_pty::PtySize;
|
||||
use portable_pty::native_pty_system;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::exec_command::exec_command_params::ExecCommandParams;
|
||||
use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use crate::exec_command::exec_command_session::ExecCommandSession;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SessionManager {
|
||||
next_session_id: AtomicU32,
|
||||
sessions: Mutex<HashMap<SessionId, ExecCommandSession>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ExecCommandOutput {
|
||||
wall_time: Duration,
|
||||
exit_status: ExitStatus,
|
||||
original_token_count: Option<u64>,
|
||||
output: String,
|
||||
}
|
||||
|
||||
impl ExecCommandOutput {
|
||||
fn to_text_output(&self) -> String {
|
||||
let wall_time_secs = self.wall_time.as_secs_f32();
|
||||
let termination_status = match self.exit_status {
|
||||
ExitStatus::Exited(code) => format!("Process exited with code {code}"),
|
||||
ExitStatus::Ongoing(session_id) => {
|
||||
format!("Process running with session ID {}", session_id.0)
|
||||
}
|
||||
};
|
||||
let truncation_status = match self.original_token_count {
|
||||
Some(tokens) => {
|
||||
format!("\nWarning: truncated output (original token count: {tokens})")
|
||||
}
|
||||
None => "".to_string(),
|
||||
};
|
||||
format!(
|
||||
r#"Wall time: {wall_time_secs:.3} seconds
|
||||
{termination_status}{truncation_status}
|
||||
Output:
|
||||
{output}"#,
|
||||
output = self.output
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExitStatus {
|
||||
Exited(i32),
|
||||
Ongoing(SessionId),
|
||||
}
|
||||
|
||||
pub fn result_into_payload(result: Result<ExecCommandOutput, String>) -> FunctionCallOutputPayload {
|
||||
match result {
|
||||
Ok(output) => FunctionCallOutputPayload {
|
||||
content: output.to_text_output(),
|
||||
success: Some(true),
|
||||
},
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: err,
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionManager {
|
||||
/// Processes the request and is required to send a response via `outgoing`.
|
||||
pub async fn handle_exec_command_request(
|
||||
&self,
|
||||
params: ExecCommandParams,
|
||||
) -> Result<ExecCommandOutput, String> {
|
||||
// Allocate a session id.
|
||||
let session_id = SessionId(
|
||||
self.next_session_id
|
||||
.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
|
||||
);
|
||||
|
||||
let (session, mut exit_rx) =
|
||||
create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
|
||||
// Insert into session map.
|
||||
let mut output_rx = session.output_receiver();
|
||||
self.sessions.lock().await.insert(session_id, session);
|
||||
|
||||
// Collect output until either timeout expires or process exits.
|
||||
// Do not cap during collection; truncate at the end if needed.
|
||||
// Use a modest initial capacity to avoid large preallocation.
|
||||
let cap_bytes_u64 = params.max_output_tokens.saturating_mul(4);
|
||||
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
|
||||
let start_time = Instant::now();
|
||||
let deadline = start_time + Duration::from_millis(params.yield_time_ms);
|
||||
let mut exit_code: Option<i32> = None;
|
||||
|
||||
loop {
|
||||
if Instant::now() >= deadline {
|
||||
break;
|
||||
}
|
||||
let remaining = deadline.saturating_duration_since(Instant::now());
|
||||
tokio::select! {
|
||||
biased;
|
||||
exit = &mut exit_rx => {
|
||||
exit_code = exit.ok();
|
||||
// Small grace period to pull remaining buffered output
|
||||
let grace_deadline = Instant::now() + Duration::from_millis(25);
|
||||
while Instant::now() < grace_deadline {
|
||||
match timeout(Duration::from_millis(1), output_rx.recv()).await {
|
||||
Ok(Ok(chunk)) => {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; keep trying within grace period.
|
||||
continue;
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
chunk = timeout(remaining, output_rx.recv()) => {
|
||||
match chunk {
|
||||
Ok(Ok(chunk)) => {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; continue collecting fresh output.
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => { break; }
|
||||
Err(_) => { break; }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let output = String::from_utf8_lossy(&collected).to_string();
|
||||
|
||||
let exit_status = if let Some(code) = exit_code {
|
||||
ExitStatus::Exited(code)
|
||||
} else {
|
||||
ExitStatus::Ongoing(session_id)
|
||||
};
|
||||
|
||||
// If output exceeds cap, truncate the middle and record original token estimate.
|
||||
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
|
||||
Ok(ExecCommandOutput {
|
||||
wall_time: Instant::now().duration_since(start_time),
|
||||
exit_status,
|
||||
original_token_count,
|
||||
output,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write characters to a session's stdin and collect combined output for up to `yield_time_ms`.
|
||||
pub async fn handle_write_stdin_request(
|
||||
&self,
|
||||
params: WriteStdinParams,
|
||||
) -> Result<ExecCommandOutput, String> {
|
||||
let WriteStdinParams {
|
||||
session_id,
|
||||
chars,
|
||||
yield_time_ms,
|
||||
max_output_tokens,
|
||||
} = params;
|
||||
|
||||
// Grab handles without holding the sessions lock across await points.
|
||||
let (writer_tx, mut output_rx) = {
|
||||
let sessions = self.sessions.lock().await;
|
||||
match sessions.get(&session_id) {
|
||||
Some(session) => (session.writer_sender(), session.output_receiver()),
|
||||
None => {
|
||||
return Err(format!("unknown session id {}", session_id.0));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Write stdin if provided.
|
||||
if !chars.is_empty() && writer_tx.send(chars.into_bytes()).await.is_err() {
|
||||
return Err("failed to write to stdin".to_string());
|
||||
}
|
||||
|
||||
// Collect output up to yield_time_ms, truncating to max_output_tokens bytes.
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
let start_time = Instant::now();
|
||||
let deadline = start_time + Duration::from_millis(yield_time_ms);
|
||||
loop {
|
||||
let now = Instant::now();
|
||||
if now >= deadline {
|
||||
break;
|
||||
}
|
||||
let remaining = deadline - now;
|
||||
match timeout(remaining, output_rx.recv()).await {
|
||||
Ok(Ok(chunk)) => {
|
||||
// Collect all output within the time budget; truncate at the end.
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; continue collecting fresh output.
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
|
||||
Err(_) => break, // timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Return structured output, truncating middle if over cap.
|
||||
let output = String::from_utf8_lossy(&collected).to_string();
|
||||
let cap_bytes_u64 = max_output_tokens.saturating_mul(4);
|
||||
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
|
||||
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
|
||||
Ok(ExecCommandOutput {
|
||||
wall_time: Instant::now().duration_since(start_time),
|
||||
exit_status: ExitStatus::Ongoing(session_id),
|
||||
original_token_count,
|
||||
output,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn PTY and child process per spawn_exec_command_session logic.
|
||||
async fn create_exec_command_session(
|
||||
params: ExecCommandParams,
|
||||
) -> anyhow::Result<(ExecCommandSession, oneshot::Receiver<i32>)> {
|
||||
let ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: _,
|
||||
max_output_tokens: _,
|
||||
shell,
|
||||
login,
|
||||
} = params;
|
||||
|
||||
// Use the native pty implementation for the system
|
||||
let pty_system = native_pty_system();
|
||||
|
||||
// Create a new pty
|
||||
let pair = pty_system.openpty(PtySize {
|
||||
rows: 24,
|
||||
cols: 80,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
})?;
|
||||
|
||||
// Spawn a shell into the pty
|
||||
let mut command_builder = CommandBuilder::new(shell);
|
||||
let shell_mode_opt = if login { "-lc" } else { "-c" };
|
||||
command_builder.arg(shell_mode_opt);
|
||||
command_builder.arg(cmd);
|
||||
|
||||
let mut child = pair.slave.spawn_command(command_builder)?;
|
||||
// Obtain a killer that can signal the process independently of `.wait()`.
|
||||
let killer = child.clone_killer();
|
||||
|
||||
// Channel to forward write requests to the PTY writer.
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
// Broadcast for streaming PTY output to readers: subscribers receive from subscription time.
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
|
||||
// Reader task: drain PTY and forward chunks to output channel.
|
||||
let mut reader = pair.master.try_clone_reader()?;
|
||||
let output_tx_clone = output_tx.clone();
|
||||
let reader_handle = tokio::task::spawn_blocking(move || {
|
||||
let mut buf = [0u8; 8192];
|
||||
loop {
|
||||
match reader.read(&mut buf) {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(n) => {
|
||||
// Forward to broadcast; best-effort if there are subscribers.
|
||||
let _ = output_tx_clone.send(buf[..n].to_vec());
|
||||
}
|
||||
Err(ref e) if e.kind() == ErrorKind::Interrupted => {
|
||||
// Retry on EINTR
|
||||
continue;
|
||||
}
|
||||
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
|
||||
// We're in a blocking thread; back off briefly and retry.
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
continue;
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Writer task: apply stdin writes to the PTY writer.
|
||||
let writer = pair.master.take_writer()?;
|
||||
let writer = Arc::new(StdMutex::new(writer));
|
||||
let writer_handle = tokio::spawn({
|
||||
let writer = writer.clone();
|
||||
async move {
|
||||
while let Some(bytes) = writer_rx.recv().await {
|
||||
let writer = writer.clone();
|
||||
// Perform blocking write on a blocking thread.
|
||||
let _ = tokio::task::spawn_blocking(move || {
|
||||
if let Ok(mut guard) = writer.lock() {
|
||||
use std::io::Write;
|
||||
let _ = guard.write_all(&bytes);
|
||||
let _ = guard.flush();
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Keep the child alive until it exits, then signal exit code.
|
||||
let (exit_tx, exit_rx) = oneshot::channel::<i32>();
|
||||
let wait_handle = tokio::task::spawn_blocking(move || {
|
||||
let code = match child.wait() {
|
||||
Ok(status) => status.exit_code() as i32,
|
||||
Err(_) => -1,
|
||||
};
|
||||
let _ = exit_tx.send(code);
|
||||
});
|
||||
|
||||
// Create and store the session with channels.
|
||||
let session = ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
reader_handle,
|
||||
writer_handle,
|
||||
wait_handle,
|
||||
);
|
||||
Ok((session, exit_rx))
|
||||
}
|
||||
|
||||
/// Truncate the middle of a UTF-8 string to at most `max_bytes` bytes,
|
||||
/// preserving the beginning and the end. Returns the possibly truncated
|
||||
/// string and `Some(original_token_count)` (estimated at 4 bytes/token)
|
||||
/// if truncation occurred; otherwise returns the original string and `None`.
|
||||
fn truncate_middle(s: &str, max_bytes: usize) -> (String, Option<u64>) {
|
||||
// No truncation needed
|
||||
if s.len() <= max_bytes {
|
||||
return (s.to_string(), None);
|
||||
}
|
||||
let est_tokens = (s.len() as u64).div_ceil(4);
|
||||
if max_bytes == 0 {
|
||||
// Cannot keep any content; still return a full marker (never truncated).
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
|
||||
// Helper to truncate a string to a given byte length on a char boundary.
|
||||
fn truncate_on_boundary(input: &str, max_len: usize) -> &str {
|
||||
if input.len() <= max_len {
|
||||
return input;
|
||||
}
|
||||
let mut end = max_len;
|
||||
while end > 0 && !input.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
&input[..end]
|
||||
}
|
||||
|
||||
// Given a left/right budget, prefer newline boundaries; otherwise fall back
|
||||
// to UTF-8 char boundaries.
|
||||
fn pick_prefix_end(s: &str, left_budget: usize) -> usize {
|
||||
if let Some(head) = s.get(..left_budget)
|
||||
&& let Some(i) = head.rfind('\n')
|
||||
{
|
||||
return i + 1; // keep the newline so suffix starts on a fresh line
|
||||
}
|
||||
truncate_on_boundary(s, left_budget).len()
|
||||
}
|
||||
|
||||
fn pick_suffix_start(s: &str, right_budget: usize) -> usize {
|
||||
let start_tail = s.len().saturating_sub(right_budget);
|
||||
if let Some(tail) = s.get(start_tail..)
|
||||
&& let Some(i) = tail.find('\n')
|
||||
{
|
||||
return start_tail + i + 1; // start after newline
|
||||
}
|
||||
// Fall back to a char boundary at or after start_tail.
|
||||
let mut idx = start_tail.min(s.len());
|
||||
while idx < s.len() && !s.is_char_boundary(idx) {
|
||||
idx += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
// Refine marker length and budgets until stable. Marker is never truncated.
|
||||
let mut guess_tokens = est_tokens; // worst-case: everything truncated
|
||||
for _ in 0..4 {
|
||||
let marker = format!("…{} tokens truncated…", guess_tokens);
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
// No room for any content within the cap; return a full, untruncated marker
|
||||
// that reflects the entire truncated content.
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let mut suffix_start = pick_suffix_start(s, right_budget);
|
||||
if suffix_start < prefix_end {
|
||||
suffix_start = prefix_end;
|
||||
}
|
||||
let kept_content_bytes = prefix_end + (s.len() - suffix_start);
|
||||
let truncated_content_bytes = s.len().saturating_sub(kept_content_bytes);
|
||||
let new_tokens = (truncated_content_bytes as u64).div_ceil(4);
|
||||
if new_tokens == guess_tokens {
|
||||
let mut out = String::with_capacity(marker_len + kept_content_bytes + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
// Place marker on its own line for symmetry when we keep line boundaries.
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
return (out, Some(est_tokens));
|
||||
}
|
||||
guess_tokens = new_tokens;
|
||||
}
|
||||
|
||||
// Fallback: use last guess to build output.
|
||||
let marker = format!("…{} tokens truncated…", guess_tokens);
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let suffix_start = pick_suffix_start(s, right_budget);
|
||||
let mut out = String::with_capacity(marker_len + prefix_end + (s.len() - suffix_start) + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
(out, Some(est_tokens))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
|
||||
/// Test that verifies that [`SessionManager::handle_exec_command_request()`]
|
||||
/// and [`SessionManager::handle_write_stdin_request()`] work as expected
|
||||
/// in the presence of a process that never terminates (but produces
|
||||
/// output continuously).
|
||||
#[cfg(unix)]
|
||||
#[allow(clippy::print_stderr)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn session_manager_streams_and_truncates_from_now() {
|
||||
use crate::exec_command::exec_command_params::ExecCommandParams;
|
||||
use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use tokio::time::sleep;
|
||||
|
||||
let session_manager = SessionManager::default();
|
||||
// Long-running loop that prints an increasing counter every ~100ms.
|
||||
// Use Python for a portable, reliable sleep across shells/PTYs.
|
||||
let cmd = r#"python3 - <<'PY'
|
||||
import sys, time
|
||||
count = 0
|
||||
while True:
|
||||
print(count)
|
||||
sys.stdout.flush()
|
||||
count += 100
|
||||
time.sleep(0.1)
|
||||
PY"#
|
||||
.to_string();
|
||||
|
||||
// Start the session and collect ~3s of output.
|
||||
let params = ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: 3_000,
|
||||
max_output_tokens: 1_000, // large enough to avoid truncation here
|
||||
shell: "/bin/bash".to_string(),
|
||||
login: false,
|
||||
};
|
||||
let initial_output = match session_manager
|
||||
.handle_exec_command_request(params.clone())
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// PTY may be restricted in some sandboxes; skip in that case.
|
||||
if e.contains("openpty") || e.contains("Operation not permitted") {
|
||||
eprintln!("skipping test due to restricted PTY: {e}");
|
||||
return;
|
||||
}
|
||||
panic!("exec request failed unexpectedly: {e}");
|
||||
}
|
||||
};
|
||||
eprintln!("initial output: {initial_output:?}");
|
||||
|
||||
// Should be ongoing (we launched a never-ending loop).
|
||||
let session_id = match initial_output.exit_status {
|
||||
ExitStatus::Ongoing(id) => id,
|
||||
_ => panic!("expected ongoing session"),
|
||||
};
|
||||
|
||||
// Parse the numeric lines and get the max observed value in the first window.
|
||||
let first_nums = extract_monotonic_numbers(&initial_output.output);
|
||||
assert!(
|
||||
!first_nums.is_empty(),
|
||||
"expected some output from first window"
|
||||
);
|
||||
let first_max = *first_nums.iter().max().unwrap();
|
||||
|
||||
// Wait ~4s so counters progress while we're not reading.
|
||||
sleep(Duration::from_millis(4_000)).await;
|
||||
|
||||
// Now read ~3s of output "from now" only.
|
||||
// Use a small token cap so truncation occurs and we test middle truncation.
|
||||
let write_params = WriteStdinParams {
|
||||
session_id,
|
||||
chars: String::new(),
|
||||
yield_time_ms: 3_000,
|
||||
max_output_tokens: 16, // 16 tokens ~= 64 bytes -> likely truncation
|
||||
};
|
||||
let second = session_manager
|
||||
.handle_write_stdin_request(write_params)
|
||||
.await
|
||||
.expect("write stdin should succeed");
|
||||
|
||||
// Verify truncation metadata and size bound (cap is tokens*4 bytes).
|
||||
assert!(second.original_token_count.is_some());
|
||||
let cap_bytes = (16u64 * 4) as usize;
|
||||
assert!(second.output.len() <= cap_bytes);
|
||||
// New middle marker should be present.
|
||||
assert!(
|
||||
second.output.contains("tokens truncated") && second.output.contains('…'),
|
||||
"expected truncation marker in output, got: {}",
|
||||
second.output
|
||||
);
|
||||
|
||||
// Minimal freshness check: the earliest number we see in the second window
|
||||
// should be significantly larger than the last from the first window.
|
||||
let second_nums = extract_monotonic_numbers(&second.output);
|
||||
assert!(
|
||||
!second_nums.is_empty(),
|
||||
"expected some numeric output from second window"
|
||||
);
|
||||
let second_min = *second_nums.iter().min().unwrap();
|
||||
|
||||
// We slept 4 seconds (~40 ticks at 100ms/tick, each +100), so expect
|
||||
// an increase of roughly 4000 or more. Allow a generous margin.
|
||||
assert!(
|
||||
second_min >= first_max + 2000,
|
||||
"second_min={second_min} first_max={first_max}",
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn extract_monotonic_numbers(s: &str) -> Vec<i64> {
|
||||
s.lines()
|
||||
.filter_map(|line| {
|
||||
if !line.is_empty()
|
||||
&& line.chars().all(|c| c.is_ascii_digit())
|
||||
&& let Ok(n) = line.parse::<i64>()
|
||||
{
|
||||
// Our generator increments by 100; ignore spurious fragments.
|
||||
if n % 100 == 0 {
|
||||
return Some(n);
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_text_output_exited_no_truncation() {
|
||||
let out = ExecCommandOutput {
|
||||
wall_time: Duration::from_millis(1234),
|
||||
exit_status: ExitStatus::Exited(0),
|
||||
original_token_count: None,
|
||||
output: "hello".to_string(),
|
||||
};
|
||||
let text = out.to_text_output();
|
||||
let expected = r#"Wall time: 1.234 seconds
|
||||
Process exited with code 0
|
||||
Output:
|
||||
hello"#;
|
||||
assert_eq!(expected, text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_text_output_ongoing_with_truncation() {
|
||||
let out = ExecCommandOutput {
|
||||
wall_time: Duration::from_millis(500),
|
||||
exit_status: ExitStatus::Ongoing(SessionId(42)),
|
||||
original_token_count: Some(1000),
|
||||
output: "abc".to_string(),
|
||||
};
|
||||
let text = out.to_text_output();
|
||||
let expected = r#"Wall time: 0.500 seconds
|
||||
Process running with session ID 42
|
||||
Warning: truncated output (original token count: 1000)
|
||||
Output:
|
||||
abc"#;
|
||||
assert_eq!(expected, text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_no_newlines_fallback() {
|
||||
// A long string with no newlines that exceeds the cap.
|
||||
let s = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
let max_bytes = 16; // force truncation
|
||||
let (out, original) = truncate_middle(s, max_bytes);
|
||||
// For very small caps, we return the full, untruncated marker,
|
||||
// even if it exceeds the cap.
|
||||
assert_eq!(out, "…16 tokens truncated…");
|
||||
// Original string length is 62 bytes => ceil(62/4) = 16 tokens.
|
||||
assert_eq!(original, Some(16));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_prefers_newline_boundaries() {
|
||||
// Build a multi-line string of 20 numbered lines (each "NNN\n").
|
||||
let mut s = String::new();
|
||||
for i in 1..=20 {
|
||||
s.push_str(&format!("{i:03}\n"));
|
||||
}
|
||||
// Total length: 20 lines * 4 bytes per line = 80 bytes.
|
||||
assert_eq!(s.len(), 80);
|
||||
|
||||
// Choose a cap that forces truncation while leaving room for
|
||||
// a few lines on each side after accounting for the marker.
|
||||
let max_bytes = 64;
|
||||
// Expect exact output: first 4 lines, marker, last 4 lines, and correct token estimate (80/4 = 20).
|
||||
assert_eq!(
|
||||
truncate_middle(&s, max_bytes),
|
||||
(
|
||||
r#"001
|
||||
002
|
||||
003
|
||||
004
|
||||
…12 tokens truncated…
|
||||
017
|
||||
018
|
||||
019
|
||||
020
|
||||
"#
|
||||
.to_string(),
|
||||
Some(20)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ mod conversation_history;
|
||||
mod environment_context;
|
||||
pub mod error;
|
||||
pub mod exec;
|
||||
mod exec_command;
|
||||
pub mod exec_env;
|
||||
mod flags;
|
||||
pub mod git_info;
|
||||
|
||||
@@ -90,6 +90,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
)
|
||||
} else if slug.starts_with("gpt-4.1") {
|
||||
model_family!(
|
||||
@@ -106,6 +107,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(
|
||||
slug, "gpt-5",
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
|
||||
@@ -79,13 +79,13 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
}),
|
||||
|
||||
"gpt-5" => Some(ModelInfo {
|
||||
context_window: 400_000,
|
||||
max_output_tokens: 128_000,
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
}),
|
||||
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo {
|
||||
context_window: 400_000,
|
||||
max_output_tokens: 128_000,
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
}),
|
||||
|
||||
_ => None,
|
||||
|
||||
@@ -47,8 +47,6 @@ pub(crate) enum OpenAiTool {
|
||||
Function(ResponsesApiTool),
|
||||
#[serde(rename = "local_shell")]
|
||||
LocalShell {},
|
||||
#[serde(rename = "web_search")]
|
||||
WebSearch {},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
@@ -58,46 +56,29 @@ pub enum ConfigShellToolType {
|
||||
DefaultShell,
|
||||
ShellWithRequest { sandbox_policy: SandboxPolicy },
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct ToolsConfig {
|
||||
pub struct ToolsConfig {
|
||||
pub shell_type: ConfigShellToolType,
|
||||
pub plan_tool: bool,
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
pub web_search_request: bool,
|
||||
}
|
||||
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
pub(crate) use_streamable_shell_tool: bool,
|
||||
}
|
||||
|
||||
impl ToolsConfig {
|
||||
pub fn new(params: &ToolsConfigParams) -> Self {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
use_streamable_shell_tool,
|
||||
} = params;
|
||||
let mut shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::StreamableShell
|
||||
} else if model_family.uses_local_shell_tool {
|
||||
pub fn new(
|
||||
model_family: &ModelFamily,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
include_plan_tool: bool,
|
||||
include_apply_patch_tool: bool,
|
||||
) -> Self {
|
||||
let mut shell_type = if model_family.uses_local_shell_tool {
|
||||
ConfigShellToolType::LocalShell
|
||||
} else {
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest {
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
}
|
||||
@@ -107,7 +88,7 @@ impl ToolsConfig {
|
||||
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
|
||||
Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function),
|
||||
None => {
|
||||
if *include_apply_patch_tool {
|
||||
if include_apply_patch_tool {
|
||||
Some(ApplyPatchToolType::Freeform)
|
||||
} else {
|
||||
None
|
||||
@@ -117,9 +98,8 @@ impl ToolsConfig {
|
||||
|
||||
Self {
|
||||
shell_type,
|
||||
plan_tool: *include_plan_tool,
|
||||
plan_tool: include_plan_tool,
|
||||
apply_patch_tool_type,
|
||||
web_search_request: *include_web_search_request,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -512,14 +492,6 @@ pub(crate) fn get_openai_tools(
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_write_stdin_tool_for_responses_api(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if config.plan_tool {
|
||||
@@ -537,17 +509,8 @@ pub(crate) fn get_openai_tools(
|
||||
}
|
||||
}
|
||||
|
||||
if config.web_search_request {
|
||||
tools.push(OpenAiTool::WebSearch {});
|
||||
}
|
||||
|
||||
if let Some(mcp_tools) = mcp_tools {
|
||||
// Ensure deterministic ordering to maximize prompt cache hits.
|
||||
// HashMap iteration order is non-deterministic, so sort by fully-qualified tool name.
|
||||
let mut entries: Vec<(String, mcp_types::Tool)> = mcp_tools.into_iter().collect();
|
||||
entries.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
for (name, tool) in entries.into_iter() {
|
||||
for (name, tool) in mcp_tools {
|
||||
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
|
||||
Ok(converted_tool) => tools.push(OpenAiTool::Function(converted_tool)),
|
||||
Err(e) => {
|
||||
@@ -574,7 +537,6 @@ mod tests {
|
||||
.map(|tool| match tool {
|
||||
OpenAiTool::Function(ResponsesApiTool { name, .. }) => name,
|
||||
OpenAiTool::LocalShell {} => "local_shell",
|
||||
OpenAiTool::WebSearch {} => "web_search",
|
||||
OpenAiTool::Freeform(FreeformTool { name, .. }) => name,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -596,49 +558,43 @@ mod tests {
|
||||
fn test_get_openai_tools() {
|
||||
let model_family = find_family_for_model("codex-mini-latest")
|
||||
.expect("codex-mini-latest should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(&tools, &["local_shell", "update_plan", "web_search"]);
|
||||
assert_eq_tool_names(&tools, &["local_shell", "update_plan"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_default_shell() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "update_plan", "web_search"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "update_plan"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_mcp_tools() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
Some(HashMap::from([(
|
||||
@@ -660,8 +616,8 @@ mod tests {
|
||||
"number_property": { "type": "number" },
|
||||
},
|
||||
"required": [
|
||||
"string_property".to_string(),
|
||||
"number_property".to_string()
|
||||
"string_property",
|
||||
"number_property"
|
||||
],
|
||||
"additionalProperties": Some(false),
|
||||
},
|
||||
@@ -677,13 +633,10 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["shell", "web_search", "test_server/do_something_cool"],
|
||||
);
|
||||
assert_eq_tool_names(&tools, &["shell", "test_server/do_something_cool"]);
|
||||
|
||||
assert_eq!(
|
||||
tools[2],
|
||||
tools[1],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "test_server/do_something_cool".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -726,93 +679,16 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_mcp_tools_sorted_by_name() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
// Intentionally construct a map with keys that would sort alphabetically.
|
||||
let tools_map: HashMap<String, mcp_types::Tool> = HashMap::from([
|
||||
(
|
||||
"test_server/do".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "a".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("a".to_string()),
|
||||
},
|
||||
),
|
||||
(
|
||||
"test_server/something".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "b".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("b".to_string()),
|
||||
},
|
||||
),
|
||||
(
|
||||
"test_server/cool".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "c".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("c".to_string()),
|
||||
},
|
||||
),
|
||||
]);
|
||||
|
||||
let tools = get_openai_tools(&config, Some(tools_map));
|
||||
// Expect shell first, followed by MCP tools sorted by fully-qualified name.
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&[
|
||||
"shell",
|
||||
"test_server/cool",
|
||||
"test_server/do",
|
||||
"test_server/something",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mcp_tool_property_missing_type_defaults_to_string() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -837,10 +713,10 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/search"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/search"]);
|
||||
|
||||
assert_eq!(
|
||||
tools[2],
|
||||
tools[1],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/search".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -862,15 +738,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_integer_normalized_to_number() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -893,9 +767,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/paginate"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/paginate"]);
|
||||
assert_eq!(
|
||||
tools[2],
|
||||
tools[1],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/paginate".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -915,15 +789,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_array_without_items_gets_default_string_items() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -946,9 +818,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/tags"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/tags"]);
|
||||
assert_eq!(
|
||||
tools[2],
|
||||
tools[1],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/tags".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -971,15 +843,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_anyof_defaults_to_string() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -1002,9 +872,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/value"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/value"]);
|
||||
assert_eq!(
|
||||
tools[2],
|
||||
tools[1],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/value".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/all/`.
|
||||
mod suite;
|
||||
@@ -70,12 +70,12 @@ async fn truncates_output_lines() {
|
||||
|
||||
let output = run_test_cmd(tmp, cmd).await.unwrap();
|
||||
|
||||
let expected_output = (1..=300)
|
||||
let expected_output = (1..=256)
|
||||
.map(|i| format!("{i}\n"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
assert_eq!(output.stdout.text, expected_output);
|
||||
assert_eq!(output.stdout.truncated_after_lines, None);
|
||||
assert_eq!(output.stdout.truncated_after_lines, Some(256));
|
||||
}
|
||||
|
||||
/// Command succeeds with exit code 0 normally
|
||||
@@ -91,8 +91,8 @@ async fn truncates_output_bytes() {
|
||||
|
||||
let output = run_test_cmd(tmp, cmd).await.unwrap();
|
||||
|
||||
assert!(output.stdout.text.len() >= 15000);
|
||||
assert_eq!(output.stdout.truncated_after_lines, None);
|
||||
assert_eq!(output.stdout.text.len(), 10240);
|
||||
assert_eq!(output.stdout.truncated_after_lines, Some(10));
|
||||
}
|
||||
|
||||
/// Command not found returns exit code 127, this is not considered a sandbox error
|
||||
@@ -139,34 +139,3 @@ async fn test_exec_stderr_stream_events_echo() {
|
||||
}
|
||||
assert_eq!(String::from_utf8_lossy(&err), "oops\n");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aggregated_output_interleaves_in_order() {
|
||||
// Spawn a shell that alternates stdout and stderr with sleeps to enforce order.
|
||||
let cmd = vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
"printf 'O1\\n'; sleep 0.01; printf 'E1\\n' 1>&2; sleep 0.01; printf 'O2\\n'; sleep 0.01; printf 'E2\\n' 1>&2".to_string(),
|
||||
];
|
||||
|
||||
let params = ExecParams {
|
||||
command: cmd,
|
||||
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
|
||||
timeout_ms: Some(5_000),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
};
|
||||
|
||||
let policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
let result = process_exec_tool_call(params, SandboxType::None, &policy, &None, None)
|
||||
.await
|
||||
.expect("process_exec_tool_call");
|
||||
|
||||
assert_eq!(result.exit_code, 0);
|
||||
assert_eq!(result.stdout.text, "O1\nO2\n");
|
||||
assert_eq!(result.stderr.text, "E1\nE2\n");
|
||||
assert_eq!(result.aggregated_output.text, "O1\nE1\nO2\nE2\n");
|
||||
assert_eq!(result.aggregated_output.truncated_after_lines, None);
|
||||
}
|
||||
@@ -107,8 +107,8 @@ async fn codex_mini_latest_tools() {
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let expected_instructions = [
|
||||
include_str!("../../prompt.md"),
|
||||
include_str!("../../../apply-patch/apply_patch_tool_instructions.md"),
|
||||
include_str!("../prompt.md"),
|
||||
include_str!("../../apply-patch/apply_patch_tool_instructions.md"),
|
||||
]
|
||||
.join("\n");
|
||||
|
||||
@@ -188,7 +188,7 @@ async fn prompt_tools_are_consistent_across_requests() {
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let expected_instructions: &str = include_str!("../../prompt.md");
|
||||
let expected_instructions: &str = include_str!("../prompt.md");
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch"];
|
||||
@@ -1,12 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
|
||||
mod cli_stream;
|
||||
mod client;
|
||||
mod compact;
|
||||
mod exec;
|
||||
mod exec_stream_events;
|
||||
mod live_cli;
|
||||
mod prompt_caching;
|
||||
mod seatbelt;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
@@ -24,7 +24,6 @@ use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TurnAbortReason;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
use codex_core::protocol::WebSearchBeginEvent;
|
||||
use owo_colors::OwoColorize;
|
||||
use owo_colors::Style;
|
||||
use shlex::try_join;
|
||||
@@ -288,10 +287,10 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::ExecCommandOutputDelta(_) => {}
|
||||
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
|
||||
call_id,
|
||||
aggregated_output,
|
||||
stdout,
|
||||
stderr,
|
||||
duration,
|
||||
exit_code,
|
||||
..
|
||||
}) => {
|
||||
let exec_command = self.call_id_to_command.remove(&call_id);
|
||||
let (duration, call) = if let Some(ExecCommandBegin { command, .. }) = exec_command
|
||||
@@ -304,7 +303,8 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
("".to_string(), format!("exec('{call_id}')"))
|
||||
};
|
||||
|
||||
let truncated_output = aggregated_output
|
||||
let output = if exit_code == 0 { stdout } else { stderr };
|
||||
let truncated_output = output
|
||||
.lines()
|
||||
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
|
||||
.collect::<Vec<_>>()
|
||||
@@ -362,9 +362,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::WebSearchBegin(WebSearchBeginEvent { call_id: _, query }) => {
|
||||
ts_println!(self, "🌐 {query}");
|
||||
}
|
||||
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
|
||||
call_id,
|
||||
auto_approved,
|
||||
@@ -542,7 +539,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
},
|
||||
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
|
||||
EventMsg::ConversationHistory(_) => {}
|
||||
}
|
||||
CodexStatus::Running
|
||||
}
|
||||
|
||||
@@ -150,7 +150,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
include_apply_patch_tool: None,
|
||||
disable_response_storage: oss.then_some(true),
|
||||
show_raw_agent_reasoning: oss.then_some(true),
|
||||
tools_web_search_request: None,
|
||||
};
|
||||
// Parse `-c` overrides.
|
||||
let cli_kv_overrides = match config_overrides.parse_overrides() {
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
339
codex-rs/exec/tests/apply_patch.rs
Normal file
339
codex-rs/exec/tests/apply_patch.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_core::CODEX_APPLY_PATCH_ARG1;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// While we may add an `apply-patch` subcommand to the `codex` CLI multitool
|
||||
/// at some point, we must ensure that the smaller `codex-exec` CLI can still
|
||||
/// emulate the `apply_patch` CLI.
|
||||
#[test]
|
||||
fn test_standalone_exec_cli_can_use_apply_patch() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let relative_path = "source.txt";
|
||||
let absolute_path = tmp.path().join(relative_path);
|
||||
fs::write(&absolute_path, "original content\n")?;
|
||||
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.arg(CODEX_APPLY_PATCH_ARG1)
|
||||
.arg(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: source.txt
|
||||
@@
|
||||
-original content
|
||||
+modified by apply_patch
|
||||
*** End Patch"#,
|
||||
)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout("Success. Updated the following files:\nM source.txt\n")
|
||||
.stderr(predicates::str::is_empty());
|
||||
assert_eq!(
|
||||
fs::read_to_string(absolute_path)?,
|
||||
"modified by apply_patch\n"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test]
|
||||
async fn test_apply_patch_tool() -> anyhow::Result<()> {
|
||||
use core_test_support::load_sse_fixture_with_id_from_str;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
const SSE_TOOL_CALL_ADD: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "function_call",
|
||||
"name": "apply_patch",
|
||||
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Add File: test.md\\n+Hello world\\n*** End Patch\"\n}",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_UPDATE: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "function_call",
|
||||
"name": "apply_patch",
|
||||
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Update File: test.md\\n@@\\n-Hello world\\n+Final text\\n*** End Patch\"\n}",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_COMPLETED: &str = r#"[
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
// Start a mock model server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// First response: model calls apply_patch to create test.md
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_ADD, "call1"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
// .and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
// Second response: model calls apply_patch to update test.md
|
||||
let second = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_UPDATE, "call2"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(second)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let final_completed = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_COMPLETED, "resp3"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(final_completed)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let tmp_cwd = TempDir::new().unwrap();
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.current_dir(tmp_cwd.path())
|
||||
.env("CODEX_HOME", tmp_cwd.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()))
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-s")
|
||||
.arg("workspace-write")
|
||||
.arg("foo")
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
// Verify final file contents
|
||||
let final_path = tmp_cwd.path().join("test.md");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(contents, "Final text\n");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test]
|
||||
async fn test_apply_patch_freeform_tool() -> anyhow::Result<()> {
|
||||
use core_test_support::load_sse_fixture_with_id_from_str;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
const SSE_TOOL_CALL_ADD: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: test.md\n+Hello world\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_UPDATE: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Update File: test.md\n@@\n-Hello world\n+Final text\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_COMPLETED: &str = r#"[
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
// Start a mock model server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// First response: model calls apply_patch to create test.md
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_ADD, "call1"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
// .and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
// Second response: model calls apply_patch to update test.md
|
||||
let second = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_UPDATE, "call2"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(second)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let final_completed = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_COMPLETED, "resp3"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
// .and(path("/v1/responses"))
|
||||
.respond_with(final_completed)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let tmp_cwd = TempDir::new().unwrap();
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.current_dir(tmp_cwd.path())
|
||||
.env("CODEX_HOME", tmp_cwd.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()))
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-s")
|
||||
.arg("workspace-write")
|
||||
.arg("foo")
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
// Verify final file contents
|
||||
let final_path = tmp_cwd.path().join("test.md");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(contents, "Final text\n");
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
class BaseClass:
|
||||
def method():
|
||||
|
||||
return True
|
||||
@@ -1,25 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: test.md\n+Hello world\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Update File: app.py\n@@ class BaseClass:\n@@ def method():\n- return False\n+ return True\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: app.py\n+class BaseClass:\n+ def method():\n+ return False\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Update File: app.py\n@@ def method():\n- return False\n+\n+ return True\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "function_call",
|
||||
"name": "apply_patch",
|
||||
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Update File: test.md\\n@@\\n-Hello world\\n+Final text\\n*** End Patch\"\n}",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,16 +0,0 @@
|
||||
[
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,146 +0,0 @@
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_core::CODEX_APPLY_PATCH_ARG1;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// While we may add an `apply-patch` subcommand to the `codex` CLI multitool
|
||||
/// at some point, we must ensure that the smaller `codex-exec` CLI can still
|
||||
/// emulate the `apply_patch` CLI.
|
||||
#[test]
|
||||
fn test_standalone_exec_cli_can_use_apply_patch() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let relative_path = "source.txt";
|
||||
let absolute_path = tmp.path().join(relative_path);
|
||||
fs::write(&absolute_path, "original content\n")?;
|
||||
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.arg(CODEX_APPLY_PATCH_ARG1)
|
||||
.arg(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: source.txt
|
||||
@@
|
||||
-original content
|
||||
+modified by apply_patch
|
||||
*** End Patch"#,
|
||||
)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout("Success. Updated the following files:\nM source.txt\n")
|
||||
.stderr(predicates::str::is_empty());
|
||||
assert_eq!(
|
||||
fs::read_to_string(absolute_path)?,
|
||||
"modified by apply_patch\n"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn test_apply_patch_tool() -> anyhow::Result<()> {
|
||||
use crate::suite::common::run_e2e_exec_test;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp_cwd = tempdir().expect("failed to create temp dir");
|
||||
let tmp_path = tmp_cwd.path().to_path_buf();
|
||||
run_e2e_exec_test(
|
||||
tmp_cwd.path(),
|
||||
vec![
|
||||
include_str!("../fixtures/sse_apply_patch_add.json").to_string(),
|
||||
include_str!("../fixtures/sse_apply_patch_update.json").to_string(),
|
||||
include_str!("../fixtures/sse_response_completed.json").to_string(),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_path = tmp_path.join("test.md");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(contents, "Final text\n");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn test_apply_patch_freeform_tool() -> anyhow::Result<()> {
|
||||
use crate::suite::common::run_e2e_exec_test;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp_cwd = tempdir().expect("failed to create temp dir");
|
||||
run_e2e_exec_test(
|
||||
tmp_cwd.path(),
|
||||
vec![
|
||||
include_str!("../fixtures/sse_apply_patch_freeform_add.json").to_string(),
|
||||
include_str!("../fixtures/sse_apply_patch_freeform_update.json").to_string(),
|
||||
include_str!("../fixtures/sse_response_completed.json").to_string(),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
// Verify final file contents
|
||||
let final_path = tmp_cwd.path().join("app.py");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(
|
||||
contents,
|
||||
include_str!("../fixtures/apply_patch_freeform_final.txt")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test]
|
||||
async fn test_apply_patch_context() -> anyhow::Result<()> {
|
||||
use crate::suite::common::run_e2e_exec_test;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp_cwd = tempdir().expect("failed to create temp dir");
|
||||
run_e2e_exec_test(
|
||||
tmp_cwd.path(),
|
||||
vec![
|
||||
include_str!("../fixtures/sse_apply_patch_freeform_add.json").to_string(),
|
||||
include_str!("../fixtures/sse_apply_patch_context_update.json").to_string(),
|
||||
include_str!("../fixtures/sse_response_completed.json").to_string(),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
// Verify final file contents
|
||||
let final_path = tmp_cwd.path().join("app.py");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(
|
||||
contents,
|
||||
r#"class BaseClass:
|
||||
def method():
|
||||
return True
|
||||
"#
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
// this file is only used for e2e tests which are currently disabled on windows
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
#![allow(clippy::expect_used)]
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use core_test_support::load_sse_fixture_with_id_from_str;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
use wiremock::Respond;
|
||||
|
||||
struct SeqResponder {
|
||||
num_calls: AtomicUsize,
|
||||
responses: Vec<String>,
|
||||
}
|
||||
|
||||
impl Respond for SeqResponder {
|
||||
fn respond(&self, _: &wiremock::Request) -> wiremock::ResponseTemplate {
|
||||
let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst);
|
||||
match self.responses.get(call_num) {
|
||||
Some(body) => wiremock::ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(body, &format!("request_{}", call_num)),
|
||||
"text/event-stream",
|
||||
),
|
||||
None => panic!("no response for {call_num}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to run an E2E test of a codex-exec call. Starts a wiremock
|
||||
/// server, and returns the response_streams in order for each api call. Runs
|
||||
/// the codex-exec command with the wiremock server as the model server.
|
||||
pub(crate) async fn run_e2e_exec_test(cwd: &Path, response_streams: Vec<String>) {
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let num_calls = response_streams.len();
|
||||
let seq_responder = SeqResponder {
|
||||
num_calls: AtomicUsize::new(0),
|
||||
responses: response_streams,
|
||||
};
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(seq_responder)
|
||||
.expect(num_calls as u64)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let cwd = cwd.to_path_buf();
|
||||
let uri = server.uri();
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")
|
||||
.expect("should find binary for codex-exec")
|
||||
.current_dir(cwd.clone())
|
||||
.env("CODEX_HOME", cwd.clone())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("OPENAI_BASE_URL", format!("{}/v1", uri))
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-s")
|
||||
.arg("danger-full-access")
|
||||
.arg("foo")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod apply_patch;
|
||||
mod common;
|
||||
mod sandbox;
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,10 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod bad;
|
||||
mod cp;
|
||||
mod good;
|
||||
mod head;
|
||||
mod literal;
|
||||
mod ls;
|
||||
mod parse_sed_command;
|
||||
mod pwd;
|
||||
mod sed;
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,2 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod landlock;
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,2 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod login_server_e2e;
|
||||
@@ -17,6 +17,7 @@ use codex_core::protocol::ReviewDecision;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::GitDiffToRemoteResponse;
|
||||
use codex_protocol::mcp_protocol::RunSubagentParams;
|
||||
use mcp_types::JSONRPCErrorError;
|
||||
use mcp_types::RequestId;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -27,6 +28,7 @@ use uuid::Uuid;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::json_to_toml::json_to_toml;
|
||||
use crate::mock_data::subagent_mock_response_from_diff;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::outgoing_message::OutgoingNotification;
|
||||
use codex_core::protocol::InputItem as CoreInputItem;
|
||||
@@ -146,6 +148,9 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::GetAuthStatus { request_id, params } => {
|
||||
self.get_auth_status(request_id, params).await;
|
||||
}
|
||||
ClientRequest::RunSubagent { request_id, params } => {
|
||||
self.run_subagent(request_id, params).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,6 +359,33 @@ impl CodexMessageProcessor {
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn run_subagent(&self, request_id: RequestId, params: RunSubagentParams) {
|
||||
let RunSubagentParams {
|
||||
conversation_id,
|
||||
subagant,
|
||||
input: _input,
|
||||
} = params;
|
||||
let Ok(_conversation) = self
|
||||
.conversation_manager
|
||||
.get_conversation(conversation_id.0)
|
||||
.await
|
||||
else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("conversation not found: {conversation_id}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
};
|
||||
// Build findings exclusively from the actual git diff used by the TUI.
|
||||
let response = match git_diff_to_remote(&self.config.cwd).await {
|
||||
Some(diff) => subagent_mock_response_from_diff(subagant, &self.config.cwd, &diff.diff),
|
||||
None => subagent_mock_response_from_diff(subagant, &self.config.cwd, ""),
|
||||
};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn process_new_conversation(&self, request_id: RequestId, params: NewConversationParams) {
|
||||
let config = match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(config) => config,
|
||||
@@ -738,7 +770,6 @@ fn derive_config_from_params(
|
||||
include_apply_patch_tool,
|
||||
disable_response_storage: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
};
|
||||
|
||||
let cli_overrides = cli_overrides
|
||||
|
||||
@@ -163,7 +163,6 @@ impl CodexToolCallParam {
|
||||
include_apply_patch_tool: None,
|
||||
disable_response_storage: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
};
|
||||
|
||||
let cli_overrides = cli_overrides
|
||||
|
||||
@@ -272,11 +272,9 @@ async fn run_codex_tool_session_inner(
|
||||
| EventMsg::PatchApplyBegin(_)
|
||||
| EventMsg::PatchApplyEnd(_)
|
||||
| EventMsg::TurnDiff(_)
|
||||
| EventMsg::WebSearchBegin(_)
|
||||
| EventMsg::GetHistoryEntryResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::TurnAborted(_)
|
||||
| EventMsg::ConversationHistory(_)
|
||||
| EventMsg::ShutdownComplete => {
|
||||
// For now, we do not do anything extra for these
|
||||
// events. Note that
|
||||
|
||||
@@ -27,6 +27,7 @@ mod error_code;
|
||||
mod exec_approval;
|
||||
mod json_to_toml;
|
||||
pub(crate) mod message_processor;
|
||||
mod mock_data;
|
||||
mod outgoing_message;
|
||||
mod patch_approval;
|
||||
|
||||
|
||||
214
codex-rs/mcp-server/src/mock_data.rs
Normal file
214
codex-rs/mcp-server/src/mock_data.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::CodeLocation;
|
||||
use codex_protocol::mcp_protocol::Finding;
|
||||
use codex_protocol::mcp_protocol::LineRange;
|
||||
use codex_protocol::mcp_protocol::ReviewOutput;
|
||||
use codex_protocol::mcp_protocol::RunSubagentResponse;
|
||||
use codex_protocol::mcp_protocol::Subagent;
|
||||
use codex_protocol::mcp_protocol::SubagentOutput;
|
||||
|
||||
/// Build a mock response for Review subagent using actual unified diff text.
|
||||
pub(crate) fn subagent_mock_response_from_diff(
|
||||
subagent: Subagent,
|
||||
cwd: &Path,
|
||||
diff: &str,
|
||||
) -> RunSubagentResponse {
|
||||
match subagent {
|
||||
Subagent::Review => {
|
||||
let findings = review_findings_from_unified_diff(cwd, diff);
|
||||
RunSubagentResponse {
|
||||
output: SubagentOutput::Review(ReviewOutput { findings }),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a unified diff and generate representative findings mapped to changed hunks.
|
||||
fn review_findings_from_unified_diff(cwd: &Path, diff: &str) -> Vec<Finding> {
|
||||
const TITLES: &[&str] = &[
|
||||
"Add a clarifying comment",
|
||||
"Consider extracting a helper function",
|
||||
"Prefer descriptive variable names",
|
||||
"Validate inputs and handle errors early",
|
||||
"Document the intent of this change",
|
||||
"Consider reducing nesting with early returns",
|
||||
"Add unit tests for this branch",
|
||||
"Ensure consistent logging and levels",
|
||||
];
|
||||
const BODIES: &[&str] = &[
|
||||
"Add a comment to this line to explain the rationale.",
|
||||
"This logic could be extracted for readability and reuse.",
|
||||
"Use a more descriptive identifier to clarify the purpose.",
|
||||
"Add a guard clause to handle invalid or edge inputs.",
|
||||
"Add a doc comment describing the behavior for maintainers.",
|
||||
"Flatten control flow using early returns where safe.",
|
||||
"Add a focused test that covers this behavior.",
|
||||
"Use the shared logger and appropriate log level.",
|
||||
];
|
||||
|
||||
let mut findings: Vec<Finding> = Vec::new();
|
||||
let mut current_file: Option<PathBuf> = None;
|
||||
let mut in_hunk: bool = false;
|
||||
let mut new_line: u32 = 1;
|
||||
let mut template_index: usize = 0;
|
||||
|
||||
for line in diff.lines() {
|
||||
if line.starts_with("diff --git ") {
|
||||
current_file = None;
|
||||
in_hunk = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rest) = line.strip_prefix("+++ b/") {
|
||||
current_file = Some(cwd.join(rest.trim()));
|
||||
continue;
|
||||
}
|
||||
if line.starts_with("+++ ") || line.starts_with("--- ") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(hunk_header) = line.strip_prefix("@@") {
|
||||
if let Some((_, after_plus)) = hunk_header.split_once('+') {
|
||||
let mut range_text = after_plus.trim();
|
||||
if let Some((seg, _)) = range_text.split_once(' ') {
|
||||
range_text = seg;
|
||||
}
|
||||
let (start, _count) = parse_start_count(range_text);
|
||||
new_line = start;
|
||||
in_hunk = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if in_hunk {
|
||||
if line.starts_with(' ') {
|
||||
new_line = new_line.saturating_add(1);
|
||||
} else if line.starts_with('-') {
|
||||
// deletion: no advance of new_line
|
||||
} else if line.starts_with('+') && !line.starts_with("+++") {
|
||||
if let Some(path) = ¤t_file {
|
||||
let title = TITLES[template_index % TITLES.len()].to_string();
|
||||
let mut body = BODIES[template_index % BODIES.len()].to_string();
|
||||
let snippet = line.trim_start_matches('+').trim();
|
||||
if !snippet.is_empty() {
|
||||
body.push_str("\nSnippet: ");
|
||||
let truncated = if snippet.len() > 140 {
|
||||
let mut s = snippet[..140].to_string();
|
||||
s.push('…');
|
||||
s
|
||||
} else {
|
||||
snippet.to_string()
|
||||
};
|
||||
body.push_str(&truncated);
|
||||
}
|
||||
|
||||
findings.push(Finding {
|
||||
title,
|
||||
body,
|
||||
confidence_score: confidence_for_index(template_index),
|
||||
code_location: CodeLocation {
|
||||
absolute_file_path: to_forward_slashes(path),
|
||||
line_range: LineRange {
|
||||
start: new_line,
|
||||
end: new_line,
|
||||
},
|
||||
},
|
||||
});
|
||||
template_index += 1;
|
||||
}
|
||||
new_line = new_line.saturating_add(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if findings.len() > 50 {
|
||||
findings.truncate(50);
|
||||
}
|
||||
findings
|
||||
}
|
||||
|
||||
fn confidence_for_index(i: usize) -> f32 {
|
||||
let base = 0.72f32;
|
||||
let step = (i as f32 % 7.0) * 0.03;
|
||||
(base + step).min(0.95)
|
||||
}
|
||||
|
||||
fn parse_start_count(text: &str) -> (u32, u32) {
|
||||
// Formats: "123,45" or just "123"
|
||||
if let Some((start_str, count_str)) = text.split_once(',') {
|
||||
let start = start_str
|
||||
.trim()
|
||||
.trim_start_matches('+')
|
||||
.parse()
|
||||
.unwrap_or(1);
|
||||
let count = count_str.trim().parse().unwrap_or(1);
|
||||
(start as u32, count as u32)
|
||||
} else {
|
||||
let start = text.trim().trim_start_matches('+').parse().unwrap_or(1);
|
||||
(start as u32, 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn to_forward_slashes(path: &Path) -> String {
|
||||
path.to_string_lossy().replace('\\', "/")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn returns_empty_findings_for_empty_diff() {
|
||||
let cwd = Path::new("/tmp");
|
||||
let resp = subagent_mock_response_from_diff(Subagent::Review, cwd, "");
|
||||
match resp.output {
|
||||
SubagentOutput::Review(ReviewOutput { findings }) => {
|
||||
assert!(findings.is_empty(), "Expected no findings for empty diff");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generates_findings_for_added_lines_with_correct_locations() {
|
||||
let cwd = Path::new("/repo");
|
||||
let diff = r#"diff --git a/src/lib.rs b/src/lib.rs
|
||||
index 1111111..2222222 100644
|
||||
--- a/src/lib.rs
|
||||
+++ b/src/lib.rs
|
||||
@@ -10,2 +10,4 @@
|
||||
context
|
||||
+let x = 1;
|
||||
+// TODO: add docs
|
||||
context
|
||||
@@ -50,3 +52,5 @@
|
||||
-context
|
||||
+context changed
|
||||
+fn new_fn() {}
|
||||
+// comment
|
||||
"#;
|
||||
|
||||
let resp = subagent_mock_response_from_diff(Subagent::Review, cwd, diff);
|
||||
match resp.output {
|
||||
SubagentOutput::Review(ReviewOutput { findings }) => {
|
||||
// Added lines: 2 in first hunk, 3 in second hunk => 5 findings
|
||||
assert_eq!(findings.len(), 5, "Expected one finding per added line");
|
||||
|
||||
// Validate file path and line numbers for the first two additions
|
||||
let file_path = "/repo/src/lib.rs".to_string();
|
||||
assert_eq!(findings[0].code_location.absolute_file_path, file_path);
|
||||
assert_eq!(findings[0].code_location.line_range.start, 11);
|
||||
assert_eq!(findings[0].code_location.line_range.end, 11);
|
||||
assert_eq!(findings[1].code_location.absolute_file_path, file_path);
|
||||
assert_eq!(findings[1].code_location.line_range.start, 12);
|
||||
assert_eq!(findings[1].code_location.line_range.end, 12);
|
||||
|
||||
// Validate second hunk first two additions start at 52, then 53
|
||||
assert_eq!(findings[2].code_location.line_range.start, 52);
|
||||
assert_eq!(findings[3].code_location.line_range.start, 53);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,8 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod auth;
|
||||
mod codex_message_processor_flow;
|
||||
mod codex_tool;
|
||||
mod create_conversation;
|
||||
mod interrupt;
|
||||
mod login;
|
||||
mod send_message;
|
||||
@@ -1,3 +0,0 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,3 +0,0 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod initialize;
|
||||
mod progress_notification;
|
||||
@@ -48,8 +48,6 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ServerNotification::export_all_to(out_dir)?;
|
||||
|
||||
generate_index_ts(out_dir)?;
|
||||
|
||||
// Prepend header to each generated .ts file
|
||||
let ts_files = ts_files_in(out_dir)?;
|
||||
for file in &ts_files {
|
||||
@@ -111,39 +109,5 @@ fn ts_files_in(dir: &Path) -> Result<Vec<PathBuf>> {
|
||||
files.push(path);
|
||||
}
|
||||
}
|
||||
files.sort();
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
/// Generate an index.ts file that re-exports all generated types.
|
||||
/// This allows consumers to import all types from a single file.
|
||||
fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
|
||||
let mut entries: Vec<String> = Vec::new();
|
||||
let mut stems: Vec<String> = ts_files_in(out_dir)?
|
||||
.into_iter()
|
||||
.filter_map(|p| {
|
||||
let stem = p.file_stem()?.to_string_lossy().into_owned();
|
||||
if stem == "index" { None } else { Some(stem) }
|
||||
})
|
||||
.collect();
|
||||
stems.sort();
|
||||
stems.dedup();
|
||||
|
||||
for name in stems {
|
||||
entries.push(format!("export type {{ {name} }} from \"./{name}\";\n"));
|
||||
}
|
||||
|
||||
let mut content =
|
||||
String::with_capacity(HEADER.len() + entries.iter().map(|s| s.len()).sum::<usize>());
|
||||
content.push_str(HEADER);
|
||||
for line in &entries {
|
||||
content.push_str(line);
|
||||
}
|
||||
|
||||
let index_path = out_dir.join("index.ts");
|
||||
let mut f = fs::File::create(&index_path)
|
||||
.with_context(|| format!("Failed to create {}", index_path.display()))?;
|
||||
f.write_all(content.as_bytes())
|
||||
.with_context(|| format!("Failed to write {}", index_path.display()))?;
|
||||
Ok(index_path)
|
||||
}
|
||||
|
||||
@@ -101,8 +101,67 @@ pub enum ClientRequest {
|
||||
request_id: RequestId,
|
||||
params: GetAuthStatusParams,
|
||||
},
|
||||
RunSubagent {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: RunSubagentParams,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunSubagentParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub subagant: Subagent,
|
||||
pub input: Option<Vec<InputItem>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Subagent {
|
||||
Review,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(tag = "type", content = "data", rename_all = "camelCase")]
|
||||
pub enum SubagentOutput {
|
||||
Review(ReviewOutput),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunSubagentResponse {
|
||||
pub output: SubagentOutput,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct ReviewOutput {
|
||||
pub findings: Vec<Finding>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct Finding {
|
||||
pub title: String,
|
||||
pub body: String,
|
||||
pub confidence_score: f32,
|
||||
pub code_location: CodeLocation,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct CodeLocation {
|
||||
pub absolute_file_path: String,
|
||||
pub line_range: LineRange,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct LineRange {
|
||||
pub start: u32,
|
||||
pub end: u32,
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NewConversationParams {
|
||||
|
||||
@@ -22,7 +22,6 @@ use uuid::Uuid;
|
||||
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::message_history::HistoryEntry;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::parse_command::ParsedCommand;
|
||||
use crate::plan_tool::UpdatePlanArgs;
|
||||
|
||||
@@ -138,10 +137,6 @@ pub enum Op {
|
||||
/// Request a single history entry identified by `log_id` + `offset`.
|
||||
GetHistoryEntryRequest { offset: usize, log_id: u64 },
|
||||
|
||||
/// Request the full in-memory conversation transcript for the current session.
|
||||
/// Reply is delivered via `EventMsg::ConversationHistory`.
|
||||
GetHistory,
|
||||
|
||||
/// Request the list of MCP tools available across all configured servers.
|
||||
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
|
||||
ListMcpTools,
|
||||
@@ -437,8 +432,6 @@ pub enum EventMsg {
|
||||
|
||||
McpToolCallEnd(McpToolCallEndEvent),
|
||||
|
||||
WebSearchBegin(WebSearchBeginEvent),
|
||||
|
||||
/// Notification that the server is about to execute a command.
|
||||
ExecCommandBegin(ExecCommandBeginEvent),
|
||||
|
||||
@@ -478,8 +471,6 @@ pub enum EventMsg {
|
||||
|
||||
/// Notification that the agent is shutting down.
|
||||
ShutdownComplete,
|
||||
|
||||
ConversationHistory(ConversationHistoryResponseEvent),
|
||||
}
|
||||
|
||||
// Individual event payload types matching each `EventMsg` variant.
|
||||
@@ -660,20 +651,6 @@ impl McpToolCallEndEvent {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct WebSearchBeginEvent {
|
||||
pub call_id: String,
|
||||
pub query: String,
|
||||
}
|
||||
|
||||
/// Response payload for `Op::GetHistory` containing the current session's
|
||||
/// in-memory transcript.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ConversationHistoryResponseEvent {
|
||||
pub conversation_id: Uuid,
|
||||
pub entries: Vec<ResponseItem>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ExecCommandBeginEvent {
|
||||
/// Identifier so this can be paired with the ExecCommandEnd event.
|
||||
@@ -693,15 +670,10 @@ pub struct ExecCommandEndEvent {
|
||||
pub stdout: String,
|
||||
/// Captured stderr
|
||||
pub stderr: String,
|
||||
/// Captured aggregated output
|
||||
#[serde(default)]
|
||||
pub aggregated_output: String,
|
||||
/// The command's exit code.
|
||||
pub exit_code: i32,
|
||||
/// The duration of the command execution.
|
||||
pub duration: Duration,
|
||||
/// Formatted output from the command, as seen by the model.
|
||||
pub formatted_output: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use crate::app_backtrack::BacktrackState;
|
||||
use crate::app_event::AppEvent;
|
||||
use crate::app_event_sender::AppEventSender;
|
||||
use crate::chatwidget::ChatWidget;
|
||||
@@ -26,31 +25,27 @@ use std::thread;
|
||||
use std::time::Duration;
|
||||
use tokio::select;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
// use uuid::Uuid;
|
||||
|
||||
pub(crate) struct App {
|
||||
pub(crate) server: Arc<ConversationManager>,
|
||||
pub(crate) app_event_tx: AppEventSender,
|
||||
pub(crate) chat_widget: ChatWidget,
|
||||
server: Arc<ConversationManager>,
|
||||
app_event_tx: AppEventSender,
|
||||
chat_widget: ChatWidget,
|
||||
|
||||
/// Config is stored here so we can recreate ChatWidgets as needed.
|
||||
pub(crate) config: Config,
|
||||
config: Config,
|
||||
|
||||
pub(crate) file_search: FileSearchManager,
|
||||
file_search: FileSearchManager,
|
||||
|
||||
pub(crate) transcript_lines: Vec<Line<'static>>,
|
||||
transcript_lines: Vec<Line<'static>>,
|
||||
|
||||
// Transcript overlay state
|
||||
pub(crate) transcript_overlay: Option<TranscriptApp>,
|
||||
pub(crate) deferred_history_lines: Vec<Line<'static>>,
|
||||
transcript_overlay: Option<TranscriptApp>,
|
||||
deferred_history_lines: Vec<Line<'static>>,
|
||||
|
||||
pub(crate) enhanced_keys_supported: bool,
|
||||
enhanced_keys_supported: bool,
|
||||
|
||||
/// Controls the animation thread that sends CommitTick events.
|
||||
pub(crate) commit_anim_running: Arc<AtomicBool>,
|
||||
|
||||
// Esc-backtracking state grouped
|
||||
pub(crate) backtrack: crate::app_backtrack::BacktrackState,
|
||||
commit_anim_running: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl App {
|
||||
@@ -92,7 +87,6 @@ impl App {
|
||||
transcript_overlay: None,
|
||||
deferred_history_lines: Vec::new(),
|
||||
commit_anim_running: Arc::new(AtomicBool::new(false)),
|
||||
backtrack: BacktrackState::default(),
|
||||
};
|
||||
|
||||
let tui_events = tui.event_stream();
|
||||
@@ -102,7 +96,7 @@ impl App {
|
||||
|
||||
while select! {
|
||||
Some(event) = app_event_rx.recv() => {
|
||||
app.handle_event(tui, event).await?
|
||||
app.handle_event(tui, event)?
|
||||
}
|
||||
Some(event) = tui_events.next() => {
|
||||
app.handle_tui_event(tui, event).await?
|
||||
@@ -117,8 +111,18 @@ impl App {
|
||||
tui: &mut tui::Tui,
|
||||
event: TuiEvent,
|
||||
) -> Result<bool> {
|
||||
if self.transcript_overlay.is_some() {
|
||||
let _ = self.handle_backtrack_overlay_event(tui, event).await?;
|
||||
if let Some(overlay) = &mut self.transcript_overlay {
|
||||
overlay.handle_event(tui, event)?;
|
||||
if overlay.is_done {
|
||||
// Exit alternate screen and restore viewport.
|
||||
let _ = tui.leave_alt_screen();
|
||||
if !self.deferred_history_lines.is_empty() {
|
||||
let lines = std::mem::take(&mut self.deferred_history_lines);
|
||||
tui.insert_history_lines(lines);
|
||||
}
|
||||
self.transcript_overlay = None;
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
} else {
|
||||
match event {
|
||||
TuiEvent::Key(key_event) => {
|
||||
@@ -157,7 +161,7 @@ impl App {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
|
||||
fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
|
||||
match event {
|
||||
AppEvent::NewSession => {
|
||||
self.chat_widget = ChatWidget::new(
|
||||
@@ -223,9 +227,6 @@ impl App {
|
||||
AppEvent::CodexEvent(event) => {
|
||||
self.chat_widget.handle_codex_event(event);
|
||||
}
|
||||
AppEvent::ConversationHistory(ev) => {
|
||||
self.on_conversation_history_for_backtrack(tui, ev).await?;
|
||||
}
|
||||
AppEvent::ExitRequest => {
|
||||
return Ok(false);
|
||||
}
|
||||
@@ -303,36 +304,10 @@ impl App {
|
||||
self.transcript_overlay = Some(TranscriptApp::new(self.transcript_lines.clone()));
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
// Esc primes/advances backtracking when composer is empty.
|
||||
KeyEvent {
|
||||
code: KeyCode::Esc,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
self.handle_backtrack_esc_key(tui);
|
||||
}
|
||||
// Enter confirms backtrack when primed + count > 0. Otherwise pass to widget.
|
||||
KeyEvent {
|
||||
code: KeyCode::Enter,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
} if self.backtrack.primed
|
||||
&& self.backtrack.count > 0
|
||||
&& self.chat_widget.composer_is_empty() =>
|
||||
{
|
||||
// Delegate to helper for clarity; preserves behavior.
|
||||
self.confirm_backtrack_from_main();
|
||||
}
|
||||
KeyEvent {
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
// Any non-Esc key press should cancel a primed backtrack.
|
||||
// This avoids stale "Esc-primed" state after the user starts typing
|
||||
// (even if they later backspace to empty).
|
||||
if key_event.code != KeyCode::Esc && self.backtrack.primed {
|
||||
self.reset_backtrack_state();
|
||||
}
|
||||
self.chat_widget.handle_key_event(key_event);
|
||||
}
|
||||
_ => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user