mirror of
https://github.com/openai/codex.git
synced 2026-02-03 15:33:41 +00:00
Compare commits
38 Commits
migrate_co
...
fix-cmd-ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b81cb7ceb3 | ||
|
|
c7a3428986 | ||
|
|
d609dfa2fc | ||
|
|
c8ebb2a0dc | ||
|
|
88e083a9d0 | ||
|
|
1c8507b32a | ||
|
|
23f31c6bff | ||
|
|
ff48ae192b | ||
|
|
a2fe2f9fb1 | ||
|
|
01ca2b5df6 | ||
|
|
368f7adfc6 | ||
|
|
68731ac74d | ||
|
|
0508823075 | ||
|
|
2ac14d1145 | ||
|
|
2371d771cc | ||
|
|
9a638dbf4e | ||
|
|
dc2aeac21f | ||
|
|
f842849bec | ||
|
|
dcf73970d2 | ||
|
|
e761924dc2 | ||
|
|
cdc3df3790 | ||
|
|
a3d3719481 | ||
|
|
11e5327770 | ||
|
|
87cce88f48 | ||
|
|
ff6d4cec6b | ||
|
|
6ef658a9f9 | ||
|
|
8b8be343a7 | ||
|
|
89c00611c2 | ||
|
|
9572cfc782 | ||
|
|
4a55646a02 | ||
|
|
209af68611 | ||
|
|
f4f9695978 | ||
|
|
5fcc380bd9 | ||
|
|
aa76003e28 | ||
|
|
fac548e430 | ||
|
|
9bd3453592 | ||
|
|
b34efde2f3 | ||
|
|
7aa46ab5fc |
4
.github/workflows/issue-deduplicator.yml
vendored
4
.github/workflows/issue-deduplicator.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare Codex inputs
|
||||
env:
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Comment on issue
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
env:
|
||||
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
|
||||
with:
|
||||
|
||||
2
.github/workflows/issue-labeler.yml
vendored
2
.github/workflows/issue-labeler.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
|
||||
@@ -1 +1 @@
|
||||
The changelog can be found on the [releases page](https://github.com/openai/codex/releases)
|
||||
The changelog can be found on the [releases page](https://github.com/openai/codex/releases).
|
||||
|
||||
@@ -33,7 +33,7 @@ Then simply run `codex` to get started:
|
||||
codex
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-update-codex-isnt-upgrading-me).
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
@@ -79,7 +79,7 @@ Codex CLI supports a rich set of configuration options, with preferences stored
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [Configuration](./docs/config.md)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
|
||||
5
codex-rs/.cargo/config.toml
Normal file
5
codex-rs/.cargo/config.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[target.'cfg(all(windows, target_env = "msvc"))']
|
||||
rustflags = ["-C", "link-arg=/STACK:8388608"]
|
||||
|
||||
[target.'cfg(all(windows, target_env = "gnu"))']
|
||||
rustflags = ["-C", "link-arg=-Wl,--stack,8388608"]
|
||||
95
codex-rs/Cargo.lock
generated
95
codex-rs/Cargo.lock
generated
@@ -172,9 +172,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.99"
|
||||
version = "1.0.100"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
|
||||
|
||||
[[package]]
|
||||
name = "app_test_support"
|
||||
@@ -891,7 +891,7 @@ dependencies = [
|
||||
"pretty_assertions",
|
||||
"similar",
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tree-sitter",
|
||||
"tree-sitter-bash",
|
||||
]
|
||||
@@ -983,6 +983,7 @@ dependencies = [
|
||||
"codex-rmcp-client",
|
||||
"codex-stdio-to-uds",
|
||||
"codex-tui",
|
||||
"codex-windows-sandbox",
|
||||
"ctor 0.5.0",
|
||||
"owo-colors",
|
||||
"predicates",
|
||||
@@ -1031,7 +1032,7 @@ dependencies = [
|
||||
"diffy",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1072,6 +1073,7 @@ dependencies = [
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-string",
|
||||
"codex-utils-tokenizer",
|
||||
"codex-windows-sandbox",
|
||||
"core-foundation 0.9.4",
|
||||
"core_test_support",
|
||||
"dirs",
|
||||
@@ -1082,7 +1084,7 @@ dependencies = [
|
||||
"futures",
|
||||
"http",
|
||||
"image",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"keyring",
|
||||
"landlock",
|
||||
"libc",
|
||||
@@ -1106,7 +1108,7 @@ dependencies = [
|
||||
"strum_macros 0.27.2",
|
||||
"tempfile",
|
||||
"test-log",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
@@ -1212,7 +1214,7 @@ dependencies = [
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"ts-rs",
|
||||
"walkdir",
|
||||
]
|
||||
@@ -1504,7 +1506,7 @@ dependencies = [
|
||||
"codex-utils-cache",
|
||||
"image",
|
||||
"tempfile",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
@@ -1532,7 +1534,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"assert_matches",
|
||||
"async-trait",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"time",
|
||||
"tokio",
|
||||
]
|
||||
@@ -1547,10 +1549,22 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"pretty_assertions",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tiktoken-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-windows-sandbox"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dirs-next",
|
||||
"rand 0.8.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "color-eyre"
|
||||
version = "0.6.5"
|
||||
@@ -2708,7 +2722,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -2752,6 +2766,12 @@ dependencies = [
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.5.0"
|
||||
@@ -3187,13 +3207,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.10.0"
|
||||
version = "2.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
|
||||
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.4",
|
||||
"hashbrown 0.16.0",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3481,7 +3502,7 @@ checksum = "b3d2ef408b88e913bfc6594f5e693d57676f6463ded7d8bf994175364320c706"
|
||||
dependencies = [
|
||||
"enumflags2",
|
||||
"libc",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4161,7 +4182,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"js-sys",
|
||||
"pin-project-lite",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
@@ -4204,7 +4225,7 @@ dependencies = [
|
||||
"prost",
|
||||
"reqwest",
|
||||
"serde_json",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
@@ -4244,7 +4265,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"rand 0.9.2",
|
||||
"serde_json",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
@@ -4355,7 +4376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4423,7 +4444,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3af6b589e163c5a788fab00ce0c0366f6efbb9959c2f9874b224936af7fce7e1"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"quick-xml",
|
||||
"serde",
|
||||
"time",
|
||||
@@ -4589,7 +4610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3ef4f2f0422f23a82ec9f628ea2acd12871c81a9362b02c43c1aa86acfc3ba1"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"nix 0.30.1",
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -4676,7 +4697,7 @@ dependencies = [
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls",
|
||||
"socket2 0.6.0",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"web-time",
|
||||
@@ -4697,7 +4718,7 @@ dependencies = [
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tinyvec",
|
||||
"tracing",
|
||||
"web-time",
|
||||
@@ -4858,7 +4879,7 @@ checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"libredox",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5009,7 +5030,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sse-stream",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
@@ -5534,7 +5555,7 @@ version = "1.0.145"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
@@ -5595,7 +5616,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"hex",
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.0.4",
|
||||
"serde",
|
||||
@@ -6172,11 +6193,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.16"
|
||||
version = "2.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
|
||||
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.16",
|
||||
"thiserror-impl 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6192,9 +6213,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.16"
|
||||
version = "2.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
|
||||
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -6413,7 +6434,7 @@ version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -6437,7 +6458,7 @@ version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
"toml_parser",
|
||||
"toml_writer",
|
||||
@@ -6496,7 +6517,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap 2.10.0",
|
||||
"indexmap 2.12.0",
|
||||
"pin-project-lite",
|
||||
"slab",
|
||||
"sync_wrapper",
|
||||
@@ -6674,7 +6695,7 @@ checksum = "adc5f880ad8d8f94e88cb81c3557024cf1a8b75e3b504c50481ed4f5a6006ff3"
|
||||
dependencies = [
|
||||
"regex",
|
||||
"streaming-iterator",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"tree-sitter",
|
||||
]
|
||||
|
||||
@@ -6697,7 +6718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be"
|
||||
dependencies = [
|
||||
"serde_json",
|
||||
"thiserror 2.0.16",
|
||||
"thiserror 2.0.17",
|
||||
"ts-rs-macros",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -87,6 +87,7 @@ codex-utils-pty = { path = "utils/pty" }
|
||||
codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-utils-tokenizer = { path = "utils/tokenizer" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
@@ -127,7 +128,7 @@ icu_provider = { version = "2.1", features = ["sync"] }
|
||||
icu_locale_core = "2.1"
|
||||
ignore = "0.4.23"
|
||||
image = { version = "^0.25.8", default-features = false }
|
||||
indexmap = "2.6.0"
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.43.2"
|
||||
itertools = "0.14.0"
|
||||
keyring = "3.6"
|
||||
@@ -181,7 +182,7 @@ sys-locale = "0.3.2"
|
||||
tempfile = "3.23.0"
|
||||
test-log = "0.2.18"
|
||||
textwrap = "0.16.2"
|
||||
thiserror = "2.0.16"
|
||||
thiserror = "2.0.17"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
@@ -210,6 +211,7 @@ walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
wildmatch = "2.5.0"
|
||||
|
||||
wiremock = "0.6"
|
||||
zeroize = "1.8.1"
|
||||
|
||||
|
||||
@@ -63,6 +63,9 @@ codex sandbox macos [--full-auto] [COMMAND]...
|
||||
# Linux
|
||||
codex sandbox linux [--full-auto] [COMMAND]...
|
||||
|
||||
# Windows
|
||||
codex sandbox windows [--full-auto] [COMMAND]...
|
||||
|
||||
# Legacy aliases
|
||||
codex debug seatbelt [--full-auto] [COMMAND]...
|
||||
codex debug landlock [--full-auto] [COMMAND]...
|
||||
|
||||
@@ -545,7 +545,7 @@ mod tests {
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn generated_ts_omits_undefined_unions_for_optionals() -> Result<()> {
|
||||
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
|
||||
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
|
||||
fs::create_dir(&output_dir)?;
|
||||
|
||||
@@ -562,7 +562,7 @@ mod tests {
|
||||
generate_ts(&output_dir, None)?;
|
||||
|
||||
let mut undefined_offenders = Vec::new();
|
||||
let mut missing_optional_marker = BTreeSet::new();
|
||||
let mut optional_nullable_offenders = BTreeSet::new();
|
||||
let mut stack = vec![output_dir];
|
||||
while let Some(dir) = stack.pop() {
|
||||
for entry in fs::read_dir(&dir)? {
|
||||
@@ -591,27 +591,80 @@ mod tests {
|
||||
let mut search_start = 0;
|
||||
while let Some(idx) = contents[search_start..].find("| null") {
|
||||
let abs_idx = search_start + idx;
|
||||
let Some(colon_idx) = contents[..abs_idx].rfind(':') else {
|
||||
// Find the property-colon for this field by scanning forward
|
||||
// from the start of the segment and ignoring nested braces,
|
||||
// brackets, and parens. This avoids colons inside nested
|
||||
// type literals like `{ [k in string]?: string }`.
|
||||
|
||||
let line_start_idx =
|
||||
contents[..abs_idx].rfind('\n').map(|i| i + 1).unwrap_or(0);
|
||||
|
||||
let mut segment_start_idx = line_start_idx;
|
||||
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind(',') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('{') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('}') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
|
||||
// Scan forward for the colon that separates the field name from its type.
|
||||
let mut level_brace = 0_i32;
|
||||
let mut level_brack = 0_i32;
|
||||
let mut level_paren = 0_i32;
|
||||
let mut in_single = false;
|
||||
let mut in_double = false;
|
||||
let mut escape = false;
|
||||
let mut prop_colon_idx = None;
|
||||
for (i, ch) in contents[segment_start_idx..abs_idx].char_indices() {
|
||||
let idx_abs = segment_start_idx + i;
|
||||
if escape {
|
||||
escape = false;
|
||||
continue;
|
||||
}
|
||||
match ch {
|
||||
'\\' => {
|
||||
// Only treat as escape when inside a string.
|
||||
if in_single || in_double {
|
||||
escape = true;
|
||||
}
|
||||
}
|
||||
'\'' => {
|
||||
if !in_double {
|
||||
in_single = !in_single;
|
||||
}
|
||||
}
|
||||
'"' => {
|
||||
if !in_single {
|
||||
in_double = !in_double;
|
||||
}
|
||||
}
|
||||
'{' if !in_single && !in_double => level_brace += 1,
|
||||
'}' if !in_single && !in_double => level_brace -= 1,
|
||||
'[' if !in_single && !in_double => level_brack += 1,
|
||||
']' if !in_single && !in_double => level_brack -= 1,
|
||||
'(' if !in_single && !in_double => level_paren += 1,
|
||||
')' if !in_single && !in_double => level_paren -= 1,
|
||||
':' if !in_single
|
||||
&& !in_double
|
||||
&& level_brace == 0
|
||||
&& level_brack == 0
|
||||
&& level_paren == 0 =>
|
||||
{
|
||||
prop_colon_idx = Some(idx_abs);
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let Some(colon_idx) = prop_colon_idx else {
|
||||
search_start = abs_idx + 5;
|
||||
continue;
|
||||
};
|
||||
|
||||
let line_start_idx = contents[..colon_idx]
|
||||
.rfind('\n')
|
||||
.map(|i| i + 1)
|
||||
.unwrap_or(0);
|
||||
|
||||
let mut segment_start_idx = line_start_idx;
|
||||
if let Some(rel_idx) = contents[line_start_idx..colon_idx].rfind(',') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
if let Some(rel_idx) = contents[line_start_idx..colon_idx].rfind('{') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
if let Some(rel_idx) = contents[line_start_idx..colon_idx].rfind('}') {
|
||||
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
|
||||
}
|
||||
|
||||
let mut field_prefix = contents[segment_start_idx..colon_idx].trim();
|
||||
if field_prefix.is_empty() {
|
||||
search_start = abs_idx + 5;
|
||||
@@ -640,25 +693,26 @@ mod tests {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the last non-whitespace before ':' is '?', then this is an
|
||||
// optional field with a nullable type (i.e., "?: T | null"),
|
||||
// which we explicitly disallow.
|
||||
if field_prefix.chars().rev().find(|c| !c.is_whitespace()) == Some('?') {
|
||||
search_start = abs_idx + 5;
|
||||
continue;
|
||||
let line_number =
|
||||
contents[..abs_idx].chars().filter(|c| *c == '\n').count() + 1;
|
||||
let offending_line_end = contents[line_start_idx..]
|
||||
.find('\n')
|
||||
.map(|i| line_start_idx + i)
|
||||
.unwrap_or(contents.len());
|
||||
let offending_snippet =
|
||||
contents[line_start_idx..offending_line_end].trim();
|
||||
|
||||
optional_nullable_offenders.insert(format!(
|
||||
"{}:{}: {offending_snippet}",
|
||||
path.display(),
|
||||
line_number
|
||||
));
|
||||
}
|
||||
|
||||
let line_number =
|
||||
contents[..abs_idx].chars().filter(|c| *c == '\n').count() + 1;
|
||||
let offending_line_end = contents[line_start_idx..]
|
||||
.find('\n')
|
||||
.map(|i| line_start_idx + i)
|
||||
.unwrap_or(contents.len());
|
||||
let offending_snippet = contents[line_start_idx..offending_line_end].trim();
|
||||
|
||||
missing_optional_marker.insert(format!(
|
||||
"{}:{}: {offending_snippet}",
|
||||
path.display(),
|
||||
line_number
|
||||
));
|
||||
|
||||
search_start = abs_idx + 5;
|
||||
}
|
||||
}
|
||||
@@ -670,12 +724,12 @@ mod tests {
|
||||
"Generated TypeScript still includes unions with `undefined` in {undefined_offenders:?}"
|
||||
);
|
||||
|
||||
// If this test fails, it means that a struct field that is `Option<T>` in Rust
|
||||
// is being generated as `T | null` in TypeScript, without the optional marker
|
||||
// (`?`). To fix this, add #[ts(optional_fields = nullable)] to the struct definition.
|
||||
// If this assertion fails, it means a field was generated as
|
||||
// "?: T | null" — i.e., both optional (undefined) and nullable (null).
|
||||
// We only want either "?: T" or ": T | null".
|
||||
assert!(
|
||||
missing_optional_marker.is_empty(),
|
||||
"Generated TypeScript has nullable fields without an optional marker: {missing_optional_marker:?}"
|
||||
optional_nullable_offenders.is_empty(),
|
||||
"Generated TypeScript has optional fields with nullable types (disallowed '?: T | null'), add #[ts(optional)] to fix:\n{optional_nullable_offenders:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -30,20 +30,20 @@ pub enum JSONRPCMessage {
|
||||
|
||||
/// A request that expects a response.
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct JSONRPCRequest {
|
||||
pub id: RequestId,
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
/// A notification which does not expect a response.
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct JSONRPCNotification {
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -62,10 +62,10 @@ pub struct JSONRPCError {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct JSONRPCErrorError {
|
||||
pub code: i64,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub data: Option<serde_json::Value>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
@@ -6,4 +6,6 @@ pub use export::generate_json;
|
||||
pub use export::generate_ts;
|
||||
pub use export::generate_types;
|
||||
pub use jsonrpc_lite::*;
|
||||
pub use protocol::*;
|
||||
pub use protocol::common::*;
|
||||
pub use protocol::v1::*;
|
||||
pub use protocol::v2::*;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
685
codex-rs/app-server-protocol/src/protocol/common.rs
Normal file
685
codex-rs/app-server-protocol/src/protocol/common.rs
Normal file
@@ -0,0 +1,685 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::JSONRPCNotification;
|
||||
use crate::JSONRPCRequest;
|
||||
use crate::RequestId;
|
||||
use crate::protocol::v1;
|
||||
use crate::protocol::v2;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use codex_protocol::protocol::ReviewDecision;
|
||||
use codex_protocol::protocol::SandboxCommandAssessment;
|
||||
use paste::paste;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display;
|
||||
use ts_rs::TS;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
|
||||
#[ts(type = "string")]
|
||||
pub struct GitSha(pub String);
|
||||
|
||||
impl GitSha {
|
||||
pub fn new(sha: &str) -> Self {
|
||||
Self(sha.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AuthMode {
|
||||
ApiKey,
|
||||
ChatGPT,
|
||||
}
|
||||
|
||||
/// Generates an `enum ClientRequest` where each variant is a request that the
|
||||
/// client can send to the server. Each variant has associated `params` and
|
||||
/// `response` types. Also generates a `export_client_responses()` function to
|
||||
/// export all response types to TypeScript.
|
||||
macro_rules! client_request_definitions {
|
||||
(
|
||||
$(
|
||||
$(#[$variant_meta:meta])*
|
||||
$variant:ident {
|
||||
params: $(#[$params_meta:meta])* $params:ty,
|
||||
response: $response:ty,
|
||||
}
|
||||
),* $(,)?
|
||||
) => {
|
||||
/// Request from the client to the server.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "method", rename_all = "camelCase")]
|
||||
pub enum ClientRequest {
|
||||
$(
|
||||
$(#[$variant_meta])*
|
||||
$variant {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
$(#[$params_meta])*
|
||||
params: $params,
|
||||
},
|
||||
)*
|
||||
}
|
||||
|
||||
pub fn export_client_responses(
|
||||
out_dir: &::std::path::Path,
|
||||
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
|
||||
$(
|
||||
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
|
||||
)*
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn export_client_response_schemas(
|
||||
out_dir: &::std::path::Path,
|
||||
) -> ::anyhow::Result<()> {
|
||||
$(
|
||||
crate::export::write_json_schema::<$response>(out_dir, stringify!($response))?;
|
||||
)*
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
client_request_definitions! {
|
||||
/// NEW APIs
|
||||
#[serde(rename = "model/list")]
|
||||
#[ts(rename = "model/list")]
|
||||
ListModels {
|
||||
params: v2::ListModelsParams,
|
||||
response: v2::ListModelsResponse,
|
||||
},
|
||||
|
||||
#[serde(rename = "account/login")]
|
||||
#[ts(rename = "account/login")]
|
||||
LoginAccount {
|
||||
params: v2::LoginAccountParams,
|
||||
response: v2::LoginAccountResponse,
|
||||
},
|
||||
|
||||
#[serde(rename = "account/logout")]
|
||||
#[ts(rename = "account/logout")]
|
||||
LogoutAccount {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v2::LogoutAccountResponse,
|
||||
},
|
||||
|
||||
#[serde(rename = "account/rateLimits/read")]
|
||||
#[ts(rename = "account/rateLimits/read")]
|
||||
GetAccountRateLimits {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v2::GetAccountRateLimitsResponse,
|
||||
},
|
||||
|
||||
#[serde(rename = "feedback/upload")]
|
||||
#[ts(rename = "feedback/upload")]
|
||||
UploadFeedback {
|
||||
params: v2::UploadFeedbackParams,
|
||||
response: v2::UploadFeedbackResponse,
|
||||
},
|
||||
|
||||
#[serde(rename = "account/read")]
|
||||
#[ts(rename = "account/read")]
|
||||
GetAccount {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v2::GetAccountResponse,
|
||||
},
|
||||
|
||||
/// DEPRECATED APIs below
|
||||
Initialize {
|
||||
params: v1::InitializeParams,
|
||||
response: v1::InitializeResponse,
|
||||
},
|
||||
NewConversation {
|
||||
params: v1::NewConversationParams,
|
||||
response: v1::NewConversationResponse,
|
||||
},
|
||||
GetConversationSummary {
|
||||
params: v1::GetConversationSummaryParams,
|
||||
response: v1::GetConversationSummaryResponse,
|
||||
},
|
||||
/// List recorded Codex conversations (rollouts) with optional pagination and search.
|
||||
ListConversations {
|
||||
params: v1::ListConversationsParams,
|
||||
response: v1::ListConversationsResponse,
|
||||
},
|
||||
/// Resume a recorded Codex conversation from a rollout file.
|
||||
ResumeConversation {
|
||||
params: v1::ResumeConversationParams,
|
||||
response: v1::ResumeConversationResponse,
|
||||
},
|
||||
ArchiveConversation {
|
||||
params: v1::ArchiveConversationParams,
|
||||
response: v1::ArchiveConversationResponse,
|
||||
},
|
||||
SendUserMessage {
|
||||
params: v1::SendUserMessageParams,
|
||||
response: v1::SendUserMessageResponse,
|
||||
},
|
||||
SendUserTurn {
|
||||
params: v1::SendUserTurnParams,
|
||||
response: v1::SendUserTurnResponse,
|
||||
},
|
||||
InterruptConversation {
|
||||
params: v1::InterruptConversationParams,
|
||||
response: v1::InterruptConversationResponse,
|
||||
},
|
||||
AddConversationListener {
|
||||
params: v1::AddConversationListenerParams,
|
||||
response: v1::AddConversationSubscriptionResponse,
|
||||
},
|
||||
RemoveConversationListener {
|
||||
params: v1::RemoveConversationListenerParams,
|
||||
response: v1::RemoveConversationSubscriptionResponse,
|
||||
},
|
||||
GitDiffToRemote {
|
||||
params: v1::GitDiffToRemoteParams,
|
||||
response: v1::GitDiffToRemoteResponse,
|
||||
},
|
||||
LoginApiKey {
|
||||
params: v1::LoginApiKeyParams,
|
||||
response: v1::LoginApiKeyResponse,
|
||||
},
|
||||
LoginChatGpt {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v1::LoginChatGptResponse,
|
||||
},
|
||||
CancelLoginChatGpt {
|
||||
params: v1::CancelLoginChatGptParams,
|
||||
response: v1::CancelLoginChatGptResponse,
|
||||
},
|
||||
LogoutChatGpt {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v1::LogoutChatGptResponse,
|
||||
},
|
||||
GetAuthStatus {
|
||||
params: v1::GetAuthStatusParams,
|
||||
response: v1::GetAuthStatusResponse,
|
||||
},
|
||||
GetUserSavedConfig {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v1::GetUserSavedConfigResponse,
|
||||
},
|
||||
SetDefaultModel {
|
||||
params: v1::SetDefaultModelParams,
|
||||
response: v1::SetDefaultModelResponse,
|
||||
},
|
||||
GetUserAgent {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v1::GetUserAgentResponse,
|
||||
},
|
||||
UserInfo {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v1::UserInfoResponse,
|
||||
},
|
||||
FuzzyFileSearch {
|
||||
params: FuzzyFileSearchParams,
|
||||
response: FuzzyFileSearchResponse,
|
||||
},
|
||||
/// Execute a command (argv vector) under the server's sandbox.
|
||||
ExecOneOffCommand {
|
||||
params: v1::ExecOneOffCommandParams,
|
||||
response: v1::ExecOneOffCommandResponse,
|
||||
},
|
||||
}
|
||||
|
||||
/// Generates an `enum ServerRequest` where each variant is a request that the
|
||||
/// server can send to the client along with the corresponding params and
|
||||
/// response types. It also generates helper types used by the app/server
|
||||
/// infrastructure (payload enum, request constructor, and export helpers).
|
||||
macro_rules! server_request_definitions {
|
||||
(
|
||||
$(
|
||||
$(#[$variant_meta:meta])*
|
||||
$variant:ident
|
||||
),* $(,)?
|
||||
) => {
|
||||
paste! {
|
||||
/// Request initiated from the server and sent to the client.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "method", rename_all = "camelCase")]
|
||||
pub enum ServerRequest {
|
||||
$(
|
||||
$(#[$variant_meta])*
|
||||
$variant {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: [<$variant Params>],
|
||||
},
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, JsonSchema)]
|
||||
pub enum ServerRequestPayload {
|
||||
$( $variant([<$variant Params>]), )*
|
||||
}
|
||||
|
||||
impl ServerRequestPayload {
|
||||
pub fn request_with_id(self, request_id: RequestId) -> ServerRequest {
|
||||
match self {
|
||||
$(Self::$variant(params) => ServerRequest::$variant { request_id, params },)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn export_server_responses(
|
||||
out_dir: &::std::path::Path,
|
||||
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
|
||||
paste! {
|
||||
$(<[<$variant Response>] as ::ts_rs::TS>::export_all_to(out_dir)?;)*
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn export_server_response_schemas(
|
||||
out_dir: &::std::path::Path,
|
||||
) -> ::anyhow::Result<()> {
|
||||
paste! {
|
||||
$(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?;)*
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl TryFrom<JSONRPCRequest> for ServerRequest {
|
||||
type Error = serde_json::Error;
|
||||
|
||||
fn try_from(value: JSONRPCRequest) -> Result<Self, Self::Error> {
|
||||
serde_json::from_value(serde_json::to_value(value)?)
|
||||
}
|
||||
}
|
||||
|
||||
server_request_definitions! {
|
||||
/// Request to approve a patch.
|
||||
ApplyPatchApproval,
|
||||
/// Request to exec a command.
|
||||
ExecCommandApproval,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyPatchApprovalParams {
|
||||
pub conversation_id: ConversationId,
|
||||
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
|
||||
/// and [codex_core::protocol::PatchApplyEndEvent].
|
||||
pub call_id: String,
|
||||
pub file_changes: HashMap<PathBuf, FileChange>,
|
||||
/// Optional explanatory reason (e.g. request for extra write access).
|
||||
pub reason: Option<String>,
|
||||
/// When set, the agent is asking the user to allow writes under this root
|
||||
/// for the remainder of the session (unclear if this is honored today).
|
||||
pub grant_root: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecCommandApprovalParams {
|
||||
pub conversation_id: ConversationId,
|
||||
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
|
||||
/// and [codex_core::protocol::ExecCommandEndEvent].
|
||||
pub call_id: String,
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub reason: Option<String>,
|
||||
pub risk: Option<SandboxCommandAssessment>,
|
||||
pub parsed_cmd: Vec<ParsedCommand>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
pub struct ExecCommandApprovalResponse {
|
||||
pub decision: ReviewDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
pub struct ApplyPatchApprovalResponse {
|
||||
pub decision: ReviewDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
pub struct FuzzyFileSearchParams {
|
||||
pub query: String,
|
||||
pub roots: Vec<String>,
|
||||
// if provided, will cancel any previous request that used the same value
|
||||
pub cancellation_token: Option<String>,
|
||||
}
|
||||
|
||||
/// Superset of [`codex_file_search::FileMatch`]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
pub struct FuzzyFileSearchResult {
|
||||
pub root: String,
|
||||
pub path: String,
|
||||
pub file_name: String,
|
||||
pub score: u32,
|
||||
pub indices: Option<Vec<u32>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
pub struct FuzzyFileSearchResponse {
|
||||
pub files: Vec<FuzzyFileSearchResult>,
|
||||
}
|
||||
|
||||
/// Notification sent from the server to the client.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
|
||||
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
|
||||
#[strum(serialize_all = "camelCase")]
|
||||
pub enum ServerNotification {
|
||||
/// NEW NOTIFICATIONS
|
||||
#[serde(rename = "account/rateLimits/updated")]
|
||||
#[ts(rename = "account/rateLimits/updated")]
|
||||
#[strum(serialize = "account/rateLimits/updated")]
|
||||
AccountRateLimitsUpdated(RateLimitSnapshot),
|
||||
|
||||
/// DEPRECATED NOTIFICATIONS below
|
||||
/// Authentication status changed
|
||||
AuthStatusChange(v1::AuthStatusChangeNotification),
|
||||
|
||||
/// ChatGPT login flow completed
|
||||
LoginChatGptComplete(v1::LoginChatGptCompleteNotification),
|
||||
|
||||
/// The special session configured event for a new or resumed conversation.
|
||||
SessionConfigured(v1::SessionConfiguredNotification),
|
||||
}
|
||||
|
||||
impl ServerNotification {
|
||||
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
match self {
|
||||
ServerNotification::AccountRateLimitsUpdated(params) => serde_json::to_value(params),
|
||||
ServerNotification::AuthStatusChange(params) => serde_json::to_value(params),
|
||||
ServerNotification::LoginChatGptComplete(params) => serde_json::to_value(params),
|
||||
ServerNotification::SessionConfigured(params) => serde_json::to_value(params),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<JSONRPCNotification> for ServerNotification {
|
||||
type Error = serde_json::Error;
|
||||
|
||||
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
|
||||
serde_json::from_value(serde_json::to_value(value)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Notification sent from the client to the server.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
|
||||
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
|
||||
#[strum(serialize_all = "camelCase")]
|
||||
pub enum ClientNotification {
|
||||
Initialized,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn serialize_new_conversation() -> Result<()> {
|
||||
let request = ClientRequest::NewConversation {
|
||||
request_id: RequestId::Integer(42),
|
||||
params: v1::NewConversationParams {
|
||||
model: Some("gpt-5-codex".to_string()),
|
||||
model_provider: None,
|
||||
profile: None,
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::OnRequest),
|
||||
sandbox: None,
|
||||
config: None,
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
include_apply_patch_tool: None,
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "newConversation",
|
||||
"id": 42,
|
||||
"params": {
|
||||
"model": "gpt-5-codex",
|
||||
"modelProvider": null,
|
||||
"profile": null,
|
||||
"cwd": null,
|
||||
"approvalPolicy": "on-request",
|
||||
"sandbox": null,
|
||||
"config": null,
|
||||
"baseInstructions": null,
|
||||
"includeApplyPatchTool": null
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conversation_id_serializes_as_plain_string() -> Result<()> {
|
||||
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
|
||||
assert_eq!(
|
||||
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
|
||||
serde_json::to_value(id)?
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
|
||||
let id: ConversationId =
|
||||
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
|
||||
|
||||
assert_eq!(
|
||||
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
|
||||
id,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_client_notification() -> Result<()> {
|
||||
let notification = ClientNotification::Initialized;
|
||||
// Note there is no "params" field for this notification.
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "initialized",
|
||||
}),
|
||||
serde_json::to_value(¬ification)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_server_request() -> Result<()> {
|
||||
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
let params = ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
call_id: "call-42".to_string(),
|
||||
command: vec!["echo".to_string(), "hello".to_string()],
|
||||
cwd: PathBuf::from("/tmp"),
|
||||
reason: Some("because tests".to_string()),
|
||||
risk: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "echo hello".to_string(),
|
||||
}],
|
||||
};
|
||||
let request = ServerRequest::ExecCommandApproval {
|
||||
request_id: RequestId::Integer(7),
|
||||
params: params.clone(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "execCommandApproval",
|
||||
"id": 7,
|
||||
"params": {
|
||||
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
|
||||
"callId": "call-42",
|
||||
"command": ["echo", "hello"],
|
||||
"cwd": "/tmp",
|
||||
"reason": "because tests",
|
||||
"risk": null,
|
||||
"parsedCmd": [
|
||||
{
|
||||
"type": "unknown",
|
||||
"cmd": "echo hello"
|
||||
}
|
||||
]
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
|
||||
let payload = ServerRequestPayload::ExecCommandApproval(params);
|
||||
assert_eq!(payload.request_with_id(RequestId::Integer(7)), request);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_get_account_rate_limits() -> Result<()> {
|
||||
let request = ClientRequest::GetAccountRateLimits {
|
||||
request_id: RequestId::Integer(1),
|
||||
params: None,
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "account/rateLimits/read",
|
||||
"id": 1,
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_account_login_api_key() -> Result<()> {
|
||||
let request = ClientRequest::LoginAccount {
|
||||
request_id: RequestId::Integer(2),
|
||||
params: v2::LoginAccountParams::ApiKey {
|
||||
api_key: "secret".to_string(),
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "account/login",
|
||||
"id": 2,
|
||||
"params": {
|
||||
"type": "apiKey",
|
||||
"apiKey": "secret"
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_account_login_chatgpt() -> Result<()> {
|
||||
let request = ClientRequest::LoginAccount {
|
||||
request_id: RequestId::Integer(3),
|
||||
params: v2::LoginAccountParams::ChatGpt,
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "account/login",
|
||||
"id": 3,
|
||||
"params": {
|
||||
"type": "chatgpt"
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_account_logout() -> Result<()> {
|
||||
let request = ClientRequest::LogoutAccount {
|
||||
request_id: RequestId::Integer(4),
|
||||
params: None,
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "account/logout",
|
||||
"id": 4,
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_get_account() -> Result<()> {
|
||||
let request = ClientRequest::GetAccount {
|
||||
request_id: RequestId::Integer(5),
|
||||
params: None,
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "account/read",
|
||||
"id": 5,
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn account_serializes_fields_in_camel_case() -> Result<()> {
|
||||
let api_key = v2::Account::ApiKey {
|
||||
api_key: "secret".to_string(),
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"type": "apiKey",
|
||||
"apiKey": "secret",
|
||||
}),
|
||||
serde_json::to_value(&api_key)?,
|
||||
);
|
||||
|
||||
let chatgpt = v2::Account::ChatGpt {
|
||||
email: Some("user@example.com".to_string()),
|
||||
plan_type: PlanType::Plus,
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"type": "chatgpt",
|
||||
"email": "user@example.com",
|
||||
"planType": "plus",
|
||||
}),
|
||||
serde_json::to_value(&chatgpt)?,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_list_models() -> Result<()> {
|
||||
let request = ClientRequest::ListModels {
|
||||
request_id: RequestId::Integer(6),
|
||||
params: v2::ListModelsParams::default(),
|
||||
};
|
||||
assert_eq!(
|
||||
json!({
|
||||
"method": "model/list",
|
||||
"id": 6,
|
||||
"params": {
|
||||
"pageSize": null,
|
||||
"cursor": null
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
6
codex-rs/app-server-protocol/src/protocol/mod.rs
Normal file
6
codex-rs/app-server-protocol/src/protocol/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
// Module declarations for the app-server protocol namespace.
|
||||
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
|
||||
|
||||
pub mod common;
|
||||
pub mod v1;
|
||||
pub mod v2;
|
||||
405
codex-rs/app-server-protocol/src/protocol/v1.rs
Normal file
405
codex-rs/app-server-protocol/src/protocol/v1.rs
Normal file
@@ -0,0 +1,405 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use ts_rs::TS;
|
||||
use uuid::Uuid;
|
||||
|
||||
// Reuse shared types defined in `common.rs`.
|
||||
use crate::protocol::common::AuthMode;
|
||||
use crate::protocol::common::GitSha;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InitializeParams {
|
||||
pub client_info: ClientInfo,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClientInfo {
|
||||
pub name: String,
|
||||
pub title: Option<String>,
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InitializeResponse {
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NewConversationParams {
|
||||
pub model: Option<String>,
|
||||
pub model_provider: Option<String>,
|
||||
pub profile: Option<String>,
|
||||
pub cwd: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox: Option<SandboxMode>,
|
||||
pub config: Option<HashMap<String, serde_json::Value>>,
|
||||
pub base_instructions: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub developer_instructions: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub compact_prompt: Option<String>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NewConversationResponse {
|
||||
pub conversation_id: ConversationId,
|
||||
pub model: String,
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResumeConversationResponse {
|
||||
pub conversation_id: ConversationId,
|
||||
pub model: String,
|
||||
pub initial_messages: Option<Vec<EventMsg>>,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(untagged)]
|
||||
pub enum GetConversationSummaryParams {
|
||||
RolloutPath {
|
||||
#[serde(rename = "rolloutPath")]
|
||||
rollout_path: PathBuf,
|
||||
},
|
||||
ConversationId {
|
||||
#[serde(rename = "conversationId")]
|
||||
conversation_id: ConversationId,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetConversationSummaryResponse {
|
||||
pub summary: ConversationSummary,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListConversationsParams {
|
||||
pub page_size: Option<usize>,
|
||||
pub cursor: Option<String>,
|
||||
pub model_providers: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConversationSummary {
|
||||
pub conversation_id: ConversationId,
|
||||
pub path: PathBuf,
|
||||
pub preview: String,
|
||||
pub timestamp: Option<String>,
|
||||
pub model_provider: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListConversationsResponse {
|
||||
pub items: Vec<ConversationSummary>,
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResumeConversationParams {
|
||||
pub path: Option<PathBuf>,
|
||||
pub conversation_id: Option<ConversationId>,
|
||||
pub history: Option<Vec<ResponseItem>>,
|
||||
pub overrides: Option<NewConversationParams>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AddConversationSubscriptionResponse {
|
||||
#[schemars(with = "String")]
|
||||
pub subscription_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArchiveConversationParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArchiveConversationResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveConversationSubscriptionResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginApiKeyParams {
|
||||
pub api_key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginApiKeyResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginChatGptResponse {
|
||||
#[schemars(with = "String")]
|
||||
pub login_id: Uuid,
|
||||
pub auth_url: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GitDiffToRemoteResponse {
|
||||
pub sha: GitSha,
|
||||
pub diff: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelLoginChatGptParams {
|
||||
#[schemars(with = "String")]
|
||||
pub login_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GitDiffToRemoteParams {
|
||||
pub cwd: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelLoginChatGptResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogoutChatGptParams {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogoutChatGptResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAuthStatusParams {
|
||||
pub include_token: Option<bool>,
|
||||
pub refresh_token: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecOneOffCommandParams {
|
||||
pub command: Vec<String>,
|
||||
pub timeout_ms: Option<u64>,
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub sandbox_policy: Option<SandboxPolicy>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecOneOffCommandResponse {
|
||||
pub exit_code: i32,
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAuthStatusResponse {
|
||||
pub auth_method: Option<AuthMode>,
|
||||
pub auth_token: Option<String>,
|
||||
pub requires_openai_auth: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetUserAgentResponse {
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UserInfoResponse {
|
||||
pub alleged_user_email: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetUserSavedConfigResponse {
|
||||
pub config: UserSavedConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SetDefaultModelParams {
|
||||
pub model: Option<String>,
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SetDefaultModelResponse {}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UserSavedConfig {
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub sandbox_settings: Option<SandboxSettings>,
|
||||
pub forced_chatgpt_workspace_id: Option<String>,
|
||||
pub forced_login_method: Option<ForcedLoginMethod>,
|
||||
pub model: Option<String>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub tools: Option<Tools>,
|
||||
pub profile: Option<String>,
|
||||
pub profiles: HashMap<String, Profile>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Profile {
|
||||
pub model: Option<String>,
|
||||
pub model_provider: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Tools {
|
||||
pub web_search: Option<bool>,
|
||||
pub view_image: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SandboxSettings {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<PathBuf>,
|
||||
pub network_access: Option<bool>,
|
||||
pub exclude_tmpdir_env_var: Option<bool>,
|
||||
pub exclude_slash_tmp: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserMessageParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub items: Vec<InputItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserTurnParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub items: Vec<InputItem>,
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub summary: ReasoningSummary,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserTurnResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InterruptConversationParams {
|
||||
pub conversation_id: ConversationId,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InterruptConversationResponse {
|
||||
pub abort_reason: TurnAbortReason,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserMessageResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AddConversationListenerParams {
|
||||
pub conversation_id: ConversationId,
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveConversationListenerParams {
|
||||
#[schemars(with = "String")]
|
||||
pub subscription_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum InputItem {
|
||||
Text { text: String },
|
||||
Image { image_url: String },
|
||||
LocalImage { path: PathBuf },
|
||||
}
|
||||
|
||||
// Deprecated notifications (v1)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginChatGptCompleteNotification {
|
||||
#[schemars(with = "String")]
|
||||
pub login_id: Uuid,
|
||||
pub success: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SessionConfiguredNotification {
|
||||
pub session_id: ConversationId,
|
||||
pub model: String,
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
pub history_log_id: u64,
|
||||
#[ts(type = "number")]
|
||||
pub history_entry_count: usize,
|
||||
pub initial_messages: Option<Vec<EventMsg>>,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthStatusChangeNotification {
|
||||
pub auth_method: Option<AuthMode>,
|
||||
}
|
||||
122
codex-rs/app-server-protocol/src/protocol/v2.rs
Normal file
122
codex-rs/app-server-protocol/src/protocol/v2.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use ts_rs::TS;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "type", rename_all = "camelCase")]
|
||||
#[ts(tag = "type")]
|
||||
pub enum Account {
|
||||
#[serde(rename = "apiKey", rename_all = "camelCase")]
|
||||
#[ts(rename = "apiKey", rename_all = "camelCase")]
|
||||
ApiKey { api_key: String },
|
||||
|
||||
#[serde(rename = "chatgpt", rename_all = "camelCase")]
|
||||
#[ts(rename = "chatgpt", rename_all = "camelCase")]
|
||||
ChatGpt {
|
||||
email: Option<String>,
|
||||
plan_type: PlanType,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "type")]
|
||||
#[ts(tag = "type")]
|
||||
pub enum LoginAccountParams {
|
||||
#[serde(rename = "apiKey")]
|
||||
#[ts(rename = "apiKey")]
|
||||
ApiKey {
|
||||
#[serde(rename = "apiKey")]
|
||||
#[ts(rename = "apiKey")]
|
||||
api_key: String,
|
||||
},
|
||||
#[serde(rename = "chatgpt")]
|
||||
#[ts(rename = "chatgpt")]
|
||||
ChatGpt,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginAccountResponse {
|
||||
/// Only set if the login method is ChatGPT.
|
||||
#[schemars(with = "String")]
|
||||
pub login_id: Option<Uuid>,
|
||||
|
||||
/// URL the client should open in a browser to initiate the OAuth flow.
|
||||
/// Only set if the login method is ChatGPT.
|
||||
pub auth_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogoutAccountResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAccountRateLimitsResponse {
|
||||
pub rate_limits: RateLimitSnapshot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAccountResponse {
|
||||
pub account: Account,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListModelsParams {
|
||||
/// Optional page size; defaults to a reasonable server-side value.
|
||||
pub page_size: Option<usize>,
|
||||
/// Opaque pagination cursor returned by a previous call.
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Model {
|
||||
pub id: String,
|
||||
pub model: String,
|
||||
pub display_name: String,
|
||||
pub description: String,
|
||||
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
|
||||
pub default_reasoning_effort: ReasoningEffort,
|
||||
// Only one model should be marked as default.
|
||||
pub is_default: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ReasoningEffortOption {
|
||||
pub reasoning_effort: ReasoningEffort,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListModelsResponse {
|
||||
pub items: Vec<Model>,
|
||||
/// Opaque cursor to pass to the next call to continue after the last item.
|
||||
/// if None, there are no more items to return.
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UploadFeedbackParams {
|
||||
pub classification: String,
|
||||
pub reason: Option<String>,
|
||||
pub conversation_id: Option<ConversationId>,
|
||||
pub include_logs: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UploadFeedbackResponse {
|
||||
pub thread_id: String,
|
||||
}
|
||||
@@ -73,8 +73,8 @@ use codex_core::auth::login_with_api_key;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigToml;
|
||||
use codex_core::config::load_config_as_toml;
|
||||
use codex_core::config_edit::ConfigEditsBuilder;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config_loader::load_config_as_toml;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
@@ -1760,6 +1760,8 @@ async fn derive_config_from_params(
|
||||
sandbox: sandbox_mode,
|
||||
config: cli_overrides,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
include_apply_patch_tool,
|
||||
} = params;
|
||||
let overrides = ConfigOverrides {
|
||||
@@ -1772,8 +1774,9 @@ async fn derive_config_from_params(
|
||||
model_provider,
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
include_apply_patch_tool,
|
||||
include_view_image_tool: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
experimental_sandbox_command_assessment: None,
|
||||
|
||||
@@ -166,6 +166,7 @@ mod tests {
|
||||
"params": {
|
||||
"loginId": Uuid::nil(),
|
||||
"success": true,
|
||||
"error": null,
|
||||
},
|
||||
}),
|
||||
serde_json::to_value(jsonrpc_notification)
|
||||
|
||||
@@ -44,7 +44,9 @@ async fn test_send_message_success() -> Result<()> {
|
||||
|
||||
// Start a conversation using the new wire API.
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams::default())
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
@@ -143,7 +145,10 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams::default())
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
developer_instructions: Some("Use the test harness tools.".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
@@ -177,6 +182,9 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
||||
})
|
||||
.await?;
|
||||
|
||||
let developer = read_raw_response_item(&mut mcp, conversation_id).await;
|
||||
assert_developer_message(&developer, "Use the test harness tools.");
|
||||
|
||||
let instructions = read_raw_response_item(&mut mcp, conversation_id).await;
|
||||
assert_instructions_message(&instructions);
|
||||
|
||||
@@ -305,10 +313,11 @@ fn assert_instructions_message(item: &ResponseItem) {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
assert_eq!(role, "user");
|
||||
let texts = content_texts(content);
|
||||
let is_instructions = texts
|
||||
.iter()
|
||||
.any(|text| text.starts_with("# AGENTS.md instructions for "));
|
||||
assert!(
|
||||
texts
|
||||
.iter()
|
||||
.any(|text| text.contains("<user_instructions>")),
|
||||
is_instructions,
|
||||
"expected instructions message, got {texts:?}"
|
||||
);
|
||||
}
|
||||
@@ -316,6 +325,21 @@ fn assert_instructions_message(item: &ResponseItem) {
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_developer_message(item: &ResponseItem, expected_text: &str) {
|
||||
match item {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
assert_eq!(role, "developer");
|
||||
let texts = content_texts(content);
|
||||
assert_eq!(
|
||||
texts,
|
||||
vec![expected_text],
|
||||
"expected developer instructions message, got {texts:?}"
|
||||
);
|
||||
}
|
||||
other => panic!("expected developer instructions message, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_environment_message(item: &ResponseItem) {
|
||||
match item {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
|
||||
@@ -47,6 +47,9 @@ tokio = { workspace = true, features = [
|
||||
"signal",
|
||||
] }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
|
||||
@@ -11,6 +11,7 @@ use codex_protocol::config_types::SandboxMode;
|
||||
|
||||
use crate::LandlockCommand;
|
||||
use crate::SeatbeltCommand;
|
||||
use crate::WindowsCommand;
|
||||
use crate::exit_status::handle_exit_status;
|
||||
|
||||
pub async fn run_command_under_seatbelt(
|
||||
@@ -51,9 +52,29 @@ pub async fn run_command_under_landlock(
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn run_command_under_windows(
|
||||
command: WindowsCommand,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
) -> anyhow::Result<()> {
|
||||
let WindowsCommand {
|
||||
full_auto,
|
||||
config_overrides,
|
||||
command,
|
||||
} = command;
|
||||
run_command_under_sandbox(
|
||||
full_auto,
|
||||
command,
|
||||
config_overrides,
|
||||
codex_linux_sandbox_exe,
|
||||
SandboxType::Windows,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
enum SandboxType {
|
||||
Seatbelt,
|
||||
Landlock,
|
||||
Windows,
|
||||
}
|
||||
|
||||
async fn run_command_under_sandbox(
|
||||
@@ -87,6 +108,63 @@ async fn run_command_under_sandbox(
|
||||
let stdio_policy = StdioPolicy::Inherit;
|
||||
let env = create_env(&config.shell_environment_policy);
|
||||
|
||||
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
|
||||
if let SandboxType::Windows = sandbox_type {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
|
||||
let policy_str = match &config.sandbox_policy {
|
||||
codex_core::protocol::SandboxPolicy::DangerFullAccess => "workspace-write",
|
||||
codex_core::protocol::SandboxPolicy::ReadOnly => "read-only",
|
||||
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
|
||||
};
|
||||
|
||||
let sandbox_cwd = sandbox_policy_cwd.clone();
|
||||
let cwd_clone = cwd.clone();
|
||||
let env_map = env.clone();
|
||||
let command_vec = command.clone();
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
run_windows_sandbox_capture(
|
||||
policy_str,
|
||||
&sandbox_cwd,
|
||||
command_vec,
|
||||
&cwd_clone,
|
||||
env_map,
|
||||
None,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
|
||||
let capture = match res {
|
||||
Ok(Ok(v)) => v,
|
||||
Ok(Err(err)) => {
|
||||
eprintln!("windows sandbox failed: {err}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
Err(join_err) => {
|
||||
eprintln!("windows sandbox join error: {join_err}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if !capture.stdout.is_empty() {
|
||||
use std::io::Write;
|
||||
let _ = std::io::stdout().write_all(&capture.stdout);
|
||||
}
|
||||
if !capture.stderr.is_empty() {
|
||||
use std::io::Write;
|
||||
let _ = std::io::stderr().write_all(&capture.stderr);
|
||||
}
|
||||
|
||||
std::process::exit(capture.exit_code);
|
||||
}
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
{
|
||||
anyhow::bail!("Windows sandbox is only available on Windows");
|
||||
}
|
||||
}
|
||||
|
||||
let mut child = match sandbox_type {
|
||||
SandboxType::Seatbelt => {
|
||||
spawn_command_under_seatbelt(
|
||||
@@ -115,6 +193,9 @@ async fn run_command_under_sandbox(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
SandboxType::Windows => {
|
||||
unreachable!("Windows sandbox should have been handled above");
|
||||
}
|
||||
};
|
||||
let status = child.wait().await?;
|
||||
|
||||
|
||||
@@ -32,3 +32,17 @@ pub struct LandlockCommand {
|
||||
#[arg(trailing_var_arg = true)]
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct WindowsCommand {
|
||||
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
|
||||
#[arg(long = "full-auto", default_value_t = false)]
|
||||
pub full_auto: bool,
|
||||
|
||||
#[clap(skip)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
/// Full command args to run under Windows restricted token sandbox.
|
||||
#[arg(trailing_var_arg = true)]
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use codex_chatgpt::apply_command::ApplyCommand;
|
||||
use codex_chatgpt::apply_command::run_apply_command;
|
||||
use codex_cli::LandlockCommand;
|
||||
use codex_cli::SeatbeltCommand;
|
||||
use codex_cli::WindowsCommand;
|
||||
use codex_cli::login::read_api_key_from_stdin;
|
||||
use codex_cli::login::run_login_status;
|
||||
use codex_cli::login::run_login_with_api_key;
|
||||
@@ -151,6 +152,9 @@ enum SandboxCommand {
|
||||
/// Run a command under Landlock+seccomp (Linux only).
|
||||
#[clap(visible_alias = "landlock")]
|
||||
Linux(LandlockCommand),
|
||||
|
||||
/// Run a command under Windows restricted token (Windows only).
|
||||
Windows(WindowsCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -472,6 +476,17 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
SandboxCommand::Windows(mut windows_cli) => {
|
||||
prepend_config_flags(
|
||||
&mut windows_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
codex_cli::debug_sandbox::run_command_under_windows(
|
||||
windows_cli,
|
||||
codex_linux_sandbox_exe,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
},
|
||||
Some(Subcommand::Apply(mut apply_cli)) => {
|
||||
prepend_config_flags(
|
||||
@@ -497,7 +512,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
// Respect root-level `-c` overrides plus top-level flags like `--profile`.
|
||||
let cli_kv_overrides = root_config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(|e| anyhow::anyhow!(e))?;
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
|
||||
// Thread through relevant top-level flags (at minimum, `--profile`).
|
||||
// Also honor `--search` since it maps to a feature toggle.
|
||||
|
||||
@@ -9,11 +9,11 @@ use codex_common::CliConfigOverrides;
|
||||
use codex_common::format_env_display::format_env_display;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config_edit::ConfigEditsBuilder;
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::mcp::auth::compute_auth_statuses;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
@@ -196,7 +196,9 @@ impl McpCli {
|
||||
|
||||
async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
// Validate any provided overrides even though they are not currently applied.
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
@@ -310,7 +312,9 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
}
|
||||
|
||||
async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
|
||||
let RemoveArgs { name } = remove_args;
|
||||
|
||||
@@ -341,15 +345,15 @@ async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveAr
|
||||
}
|
||||
|
||||
async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
bail!(
|
||||
"OAuth login is only supported when experimental_use_rmcp_client is true in config.toml."
|
||||
);
|
||||
bail!("OAuth login is only supported when [feature].rmcp_client is true in config.toml.");
|
||||
}
|
||||
|
||||
let LoginArgs { name, scopes } = login_args;
|
||||
@@ -382,7 +386,9 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
}
|
||||
|
||||
async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
@@ -409,7 +415,9 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
|
||||
}
|
||||
|
||||
async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
@@ -664,7 +672,9 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
|
||||
}
|
||||
|
||||
async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config_edit::ConfigEditsBuilder;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use predicates::prelude::PredicateBooleanExt;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
@@ -22,6 +22,6 @@ chrono = { version = "0.4", features = ["serde"] }
|
||||
diffy = "0.4.2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
thiserror = "2.0.12"
|
||||
thiserror = "2.0.17"
|
||||
codex-backend-client = { path = "../backend-client", optional = true }
|
||||
codex-git = { workspace = true }
|
||||
|
||||
@@ -83,6 +83,7 @@ tree-sitter-bash = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
which = { workspace = true }
|
||||
wildmatch = { workspace = true }
|
||||
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
|
||||
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
|
||||
@@ -82,6 +82,6 @@ OUTPUT FORMAT:
|
||||
|
||||
* **Do not** wrap the JSON in markdown fences or extra prose.
|
||||
* The code_location field is required and must include absolute_file_path and line_range.
|
||||
*Line ranges must be as short as possible for interpreting the issue (avoid ranges over 5–10 lines; pick the most suitable subrange).
|
||||
* Line ranges must be as short as possible for interpreting the issue (avoid ranges over 5–10 lines; pick the most suitable subrange).
|
||||
* The code_location should overlap with the diff.
|
||||
* Do not generate a PR fix.
|
||||
* Do not generate a PR fix.
|
||||
|
||||
@@ -21,6 +21,7 @@ use codex_protocol::models::FunctionCallOutputContentItem;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use eventsource_stream::Eventsource;
|
||||
use futures::Stream;
|
||||
use futures::StreamExt;
|
||||
@@ -347,13 +348,18 @@ pub(crate) async fn stream_chat_completions(
|
||||
|
||||
let mut req_builder = provider.create_request_builder(client, &None).await?;
|
||||
|
||||
// Include session source for backend telemetry and routing.
|
||||
let task_type = match serde_json::to_value(session_source) {
|
||||
Ok(serde_json::Value::String(s)) => s,
|
||||
Ok(other) => other.to_string(),
|
||||
Err(_) => "unknown".to_string(),
|
||||
};
|
||||
req_builder = req_builder.header("Codex-Task-Type", task_type);
|
||||
// Include subagent header only for subagent sessions.
|
||||
if let SessionSource::SubAgent(sub) = session_source.clone() {
|
||||
let subagent = if let SubAgentSource::Other(label) = sub {
|
||||
label
|
||||
} else {
|
||||
serde_json::to_value(&sub)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
|
||||
.unwrap_or_else(|| "other".to_string())
|
||||
};
|
||||
req_builder = req_builder.header("x-openai-subagent", subagent);
|
||||
}
|
||||
|
||||
let res = otel_event_manager
|
||||
.log_request(attempt, || {
|
||||
|
||||
@@ -216,10 +216,12 @@ impl ModelClient {
|
||||
let verbosity = if self.config.model_family.support_verbosity {
|
||||
self.config.model_verbosity
|
||||
} else {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored as the model does not support verbosity: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored as the model does not support verbosity: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
@@ -303,13 +305,18 @@ impl ModelClient {
|
||||
.await
|
||||
.map_err(StreamAttemptError::Fatal)?;
|
||||
|
||||
// Include session source for backend telemetry and routing.
|
||||
let task_type = match serde_json::to_value(&self.session_source) {
|
||||
Ok(serde_json::Value::String(s)) => s,
|
||||
Ok(other) => other.to_string(),
|
||||
Err(_) => "unknown".to_string(),
|
||||
};
|
||||
req_builder = req_builder.header("Codex-Task-Type", task_type);
|
||||
// Include subagent header only for subagent sessions.
|
||||
if let SessionSource::SubAgent(sub) = &self.session_source {
|
||||
let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub {
|
||||
label.clone()
|
||||
} else {
|
||||
serde_json::to_value(sub)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
|
||||
.unwrap_or_else(|| "other".to_string())
|
||||
};
|
||||
req_builder = req_builder.header("x-openai-subagent", subagent);
|
||||
}
|
||||
|
||||
req_builder = req_builder
|
||||
// Send session_id for compatibility.
|
||||
|
||||
@@ -56,8 +56,8 @@ use crate::client::ModelClient;
|
||||
use crate::client_common::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::config::Config;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::conversation_history::ConversationHistory;
|
||||
use crate::environment_context::EnvironmentContext;
|
||||
use crate::error::CodexErr;
|
||||
@@ -112,6 +112,7 @@ use crate::tools::spec::ToolsConfig;
|
||||
use crate::tools::spec::ToolsConfigParams;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use crate::unified_exec::UnifiedExecSessionManager;
|
||||
use crate::user_instructions::DeveloperInstructions;
|
||||
use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
@@ -171,8 +172,10 @@ impl Codex {
|
||||
model: config.model.clone(),
|
||||
model_reasoning_effort: config.model_reasoning_effort,
|
||||
model_reasoning_summary: config.model_reasoning_summary,
|
||||
developer_instructions: config.developer_instructions.clone(),
|
||||
user_instructions,
|
||||
base_instructions: config.base_instructions.clone(),
|
||||
compact_prompt: config.compact_prompt.clone(),
|
||||
approval_policy: config.approval_policy,
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
@@ -264,7 +267,9 @@ pub(crate) struct TurnContext {
|
||||
/// the model as well as sandbox policies are resolved against this path
|
||||
/// instead of `std::env::current_dir()`.
|
||||
pub(crate) cwd: PathBuf,
|
||||
pub(crate) developer_instructions: Option<String>,
|
||||
pub(crate) base_instructions: Option<String>,
|
||||
pub(crate) compact_prompt: Option<String>,
|
||||
pub(crate) user_instructions: Option<String>,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
@@ -281,6 +286,12 @@ impl TurnContext {
|
||||
.map(PathBuf::from)
|
||||
.map_or_else(|| self.cwd.clone(), |p| self.cwd.join(p))
|
||||
}
|
||||
|
||||
pub(crate) fn compact_prompt(&self) -> &str {
|
||||
self.compact_prompt
|
||||
.as_deref()
|
||||
.unwrap_or(compact::SUMMARIZATION_PROMPT)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
@@ -295,12 +306,18 @@ pub(crate) struct SessionConfiguration {
|
||||
model_reasoning_effort: Option<ReasoningEffortConfig>,
|
||||
model_reasoning_summary: ReasoningSummaryConfig,
|
||||
|
||||
/// Developer instructions that supplement the base instructions.
|
||||
developer_instructions: Option<String>,
|
||||
|
||||
/// Model instructions that are appended to the base instructions.
|
||||
user_instructions: Option<String>,
|
||||
|
||||
/// Base instructions override.
|
||||
base_instructions: Option<String>,
|
||||
|
||||
/// Compact prompt override.
|
||||
compact_prompt: Option<String>,
|
||||
|
||||
/// When to escalate for approval for execution
|
||||
approval_policy: AskForApproval,
|
||||
/// How to sandbox commands executed in the system
|
||||
@@ -406,7 +423,9 @@ impl Session {
|
||||
sub_id,
|
||||
client,
|
||||
cwd: session_configuration.cwd.clone(),
|
||||
developer_instructions: session_configuration.developer_instructions.clone(),
|
||||
base_instructions: session_configuration.base_instructions.clone(),
|
||||
compact_prompt: session_configuration.compact_prompt.clone(),
|
||||
user_instructions: session_configuration.user_instructions.clone(),
|
||||
approval_policy: session_configuration.approval_policy,
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
@@ -979,9 +998,18 @@ impl Session {
|
||||
}
|
||||
|
||||
pub(crate) fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> {
|
||||
let mut items = Vec::<ResponseItem>::with_capacity(2);
|
||||
let mut items = Vec::<ResponseItem>::with_capacity(3);
|
||||
if let Some(developer_instructions) = turn_context.developer_instructions.as_deref() {
|
||||
items.push(DeveloperInstructions::new(developer_instructions.to_string()).into());
|
||||
}
|
||||
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
|
||||
items.push(UserInstructions::new(user_instructions.to_string()).into());
|
||||
items.push(
|
||||
UserInstructions {
|
||||
text: user_instructions.to_string(),
|
||||
directory: turn_context.cwd.to_string_lossy().into_owned(),
|
||||
}
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
items.push(ResponseItem::from(EnvironmentContext::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
@@ -1313,7 +1341,7 @@ mod handlers {
|
||||
use crate::codex::Session;
|
||||
use crate::codex::SessionSettingsUpdate;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::compact;
|
||||
|
||||
use crate::codex::spawn_review_thread;
|
||||
use crate::config::Config;
|
||||
use crate::mcp::auth::compute_auth_statuses;
|
||||
@@ -1540,7 +1568,7 @@ mod handlers {
|
||||
// Attempt to inject input into current task
|
||||
if let Err(items) = sess
|
||||
.inject_input(vec![UserInput::Text {
|
||||
text: compact::SUMMARIZATION_PROMPT.to_string(),
|
||||
text: turn_context.compact_prompt().to_string(),
|
||||
}])
|
||||
.await
|
||||
{
|
||||
@@ -1662,8 +1690,10 @@ async fn spawn_review_thread(
|
||||
sub_id: sub_id.to_string(),
|
||||
client,
|
||||
tools_config,
|
||||
developer_instructions: None,
|
||||
user_instructions: None,
|
||||
base_instructions: Some(base_instructions.clone()),
|
||||
compact_prompt: parent_turn_context.compact_prompt.clone(),
|
||||
approval_policy: parent_turn_context.approval_policy,
|
||||
sandbox_policy: parent_turn_context.sandbox_policy.clone(),
|
||||
shell_environment_policy: parent_turn_context.shell_environment_policy.clone(),
|
||||
@@ -2277,8 +2307,8 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::mcp::auth::McpAuthStatusEntry;
|
||||
use crate::tools::format_exec_output_str;
|
||||
@@ -2498,8 +2528,10 @@ mod tests {
|
||||
model: config.model.clone(),
|
||||
model_reasoning_effort: config.model_reasoning_effort,
|
||||
model_reasoning_summary: config.model_reasoning_summary,
|
||||
developer_instructions: config.developer_instructions.clone(),
|
||||
user_instructions: config.user_instructions.clone(),
|
||||
base_instructions: config.base_instructions.clone(),
|
||||
compact_prompt: config.compact_prompt.clone(),
|
||||
approval_policy: config.approval_policy,
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
@@ -2572,8 +2604,10 @@ mod tests {
|
||||
model: config.model.clone(),
|
||||
model_reasoning_effort: config.model_reasoning_effort,
|
||||
model_reasoning_summary: config.model_reasoning_summary,
|
||||
developer_instructions: config.developer_instructions.clone(),
|
||||
user_instructions: config.user_instructions.clone(),
|
||||
base_instructions: config.base_instructions.clone(),
|
||||
compact_prompt: config.compact_prompt.clone(),
|
||||
approval_policy: config.approval_policy,
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
use crate::protocol::WarningEvent;
|
||||
use crate::truncate::truncate_middle;
|
||||
use crate::util::backoff;
|
||||
use askama::Template;
|
||||
@@ -39,9 +40,8 @@ pub(crate) async fn run_inline_auto_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
) {
|
||||
let input = vec![UserInput::Text {
|
||||
text: SUMMARIZATION_PROMPT.to_string(),
|
||||
}];
|
||||
let prompt = turn_context.compact_prompt().to_string();
|
||||
let input = vec![UserInput::Text { text: prompt }];
|
||||
run_compact_task_inner(sess, turn_context, input).await;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,11 @@ async fn run_compact_task_inner(
|
||||
message: "Compact task completed".to_string(),
|
||||
});
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
let warning = EventMsg::Warning(WarningEvent {
|
||||
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
|
||||
});
|
||||
sess.send_event(&turn_context, warning).await;
|
||||
}
|
||||
|
||||
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
|
||||
@@ -348,7 +353,8 @@ mod tests {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "<user_instructions>do things</user_instructions>".to_string(),
|
||||
text: "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\ndo things\n</INSTRUCTIONS>"
|
||||
.to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
|
||||
@@ -36,6 +36,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
parent_session: Arc<Session>,
|
||||
parent_ctx: Arc<TurnContext>,
|
||||
cancel_token: CancellationToken,
|
||||
initial_history: Option<InitialHistory>,
|
||||
) -> Result<Codex, CodexErr> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
@@ -43,7 +44,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
let CodexSpawnOk { codex, .. } = Codex::spawn(
|
||||
config,
|
||||
auth_manager,
|
||||
InitialHistory::New,
|
||||
initial_history.unwrap_or(InitialHistory::New),
|
||||
SessionSource::SubAgent(SubAgentSource::Review),
|
||||
)
|
||||
.await?;
|
||||
@@ -93,6 +94,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
parent_session: Arc<Session>,
|
||||
parent_ctx: Arc<TurnContext>,
|
||||
cancel_token: CancellationToken,
|
||||
initial_history: Option<InitialHistory>,
|
||||
) -> Result<Codex, CodexErr> {
|
||||
// Use a child token so we can stop the delegate after completion without
|
||||
// requiring the caller to cancel the parent token.
|
||||
@@ -103,6 +105,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
parent_session,
|
||||
parent_ctx,
|
||||
child_cancel.clone(),
|
||||
initial_history,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::Notice;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::Notice;
|
||||
use anyhow::Context;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -41,8 +41,8 @@ pub enum ConfigEdit {
|
||||
|
||||
// TODO(jif) move to a dedicated file
|
||||
mod document_helpers {
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use toml_edit::Array as TomlArray;
|
||||
use toml_edit::InlineTable;
|
||||
use toml_edit::Item as TomlItem;
|
||||
@@ -509,7 +509,7 @@ impl ConfigEditsBuilder {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
@@ -1,23 +1,22 @@
|
||||
use crate::auth::AuthCredentialsStoreMode;
|
||||
use crate::config::types::DEFAULT_OTEL_ENVIRONMENT;
|
||||
use crate::config::types::History;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::Notice;
|
||||
use crate::config::types::Notifications;
|
||||
use crate::config::types::OtelConfig;
|
||||
use crate::config::types::OtelConfigToml;
|
||||
use crate::config::types::OtelExporterKind;
|
||||
use crate::config::types::ReasoningSummaryFormat;
|
||||
use crate::config::types::SandboxWorkspaceWrite;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyToml;
|
||||
use crate::config::types::Tui;
|
||||
use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::LoadedConfigLayers;
|
||||
pub use crate::config_loader::load_config_as_toml;
|
||||
use crate::config_loader::load_config_as_toml;
|
||||
use crate::config_loader::load_config_layers_with_overrides;
|
||||
use crate::config_loader::merge_toml_values;
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config_types::DEFAULT_OTEL_ENVIRONMENT;
|
||||
use crate::config_types::History;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::Notice;
|
||||
use crate::config_types::Notifications;
|
||||
use crate::config_types::OtelConfig;
|
||||
use crate::config_types::OtelConfigToml;
|
||||
use crate::config_types::OtelExporterKind;
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::config_types::SandboxWorkspaceWrite;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::config_types::ShellEnvironmentPolicyToml;
|
||||
use crate::config_types::Tui;
|
||||
use crate::config_types::UriBasedFileOpener;
|
||||
use crate::features::Feature;
|
||||
use crate::features::FeatureOverrides;
|
||||
use crate::features::Features;
|
||||
@@ -51,9 +50,14 @@ use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use toml::Value as TomlValue;
|
||||
use toml_edit::DocumentMut;
|
||||
|
||||
pub mod edit;
|
||||
pub mod profile;
|
||||
pub mod types;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
@@ -124,6 +128,12 @@ pub struct Config {
|
||||
/// Base instructions override.
|
||||
pub base_instructions: Option<String>,
|
||||
|
||||
/// Developer instructions override injected as a separate message.
|
||||
pub developer_instructions: Option<String>,
|
||||
|
||||
/// Compact prompt override.
|
||||
pub compact_prompt: Option<String>,
|
||||
|
||||
/// Optional external notifier command. When set, Codex will spawn this
|
||||
/// program after each completed *turn* (i.e. when the agent finishes
|
||||
/// processing a user submission). The value must be the full command
|
||||
@@ -240,9 +250,6 @@ pub struct Config {
|
||||
/// https://github.com/modelcontextprotocol/rust-sdk
|
||||
pub use_experimental_use_rmcp_client: bool,
|
||||
|
||||
/// Include the `view_image` tool that lets the agent attach a local image path to context.
|
||||
pub include_view_image_tool: bool,
|
||||
|
||||
/// Centralized feature flags; source of truth for feature gating.
|
||||
pub features: Features,
|
||||
|
||||
@@ -265,7 +272,7 @@ pub struct Config {
|
||||
pub disable_paste_burst: bool,
|
||||
|
||||
/// OTEL configuration (exporter type, endpoint, headers, etc.).
|
||||
pub otel: crate::config_types::OtelConfig,
|
||||
pub otel: crate::config::types::OtelConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -448,7 +455,7 @@ pub(crate) fn set_project_trusted_inner(
|
||||
/// Patch `CODEX_HOME/config.toml` project state.
|
||||
/// Use with caution.
|
||||
pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Result<()> {
|
||||
use crate::config_edit::ConfigEditsBuilder;
|
||||
use crate::config::edit::ConfigEditsBuilder;
|
||||
|
||||
ConfigEditsBuilder::new(codex_home)
|
||||
.set_project_trusted(project_path)
|
||||
@@ -537,6 +544,13 @@ pub struct ConfigToml {
|
||||
/// System instructions.
|
||||
pub instructions: Option<String>,
|
||||
|
||||
/// Developer instructions inserted as a `developer` role message.
|
||||
#[serde(default)]
|
||||
pub developer_instructions: Option<String>,
|
||||
|
||||
/// Compact prompt used for history compaction.
|
||||
pub compact_prompt: Option<String>,
|
||||
|
||||
/// When set, restricts ChatGPT login to a specific workspace identifier.
|
||||
#[serde(default)]
|
||||
pub forced_chatgpt_workspace_id: Option<String>,
|
||||
@@ -629,17 +643,18 @@ pub struct ConfigToml {
|
||||
pub disable_paste_burst: Option<bool>,
|
||||
|
||||
/// OTEL configuration.
|
||||
pub otel: Option<crate::config_types::OtelConfigToml>,
|
||||
pub otel: Option<crate::config::types::OtelConfigToml>,
|
||||
|
||||
/// Tracks whether the Windows onboarding screen has been acknowledged.
|
||||
pub windows_wsl_setup_acknowledged: Option<bool>,
|
||||
|
||||
/// Collection of in-product notices (different from notifications)
|
||||
/// See [`crate::config_types::Notices`] for more details
|
||||
/// See [`crate::config::types::Notices`] for more details
|
||||
pub notice: Option<Notice>,
|
||||
|
||||
/// Legacy, now use features
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
pub experimental_compact_prompt_file: Option<PathBuf>,
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
@@ -754,6 +769,8 @@ impl ConfigToml {
|
||||
let mut forced_auto_mode_downgraded_on_windows = false;
|
||||
if cfg!(target_os = "windows")
|
||||
&& matches!(resolved_sandbox_mode, SandboxMode::WorkspaceWrite)
|
||||
// If the experimental Windows sandbox is enabled, do not force a downgrade.
|
||||
&& crate::safety::get_platform_sandbox().is_none()
|
||||
{
|
||||
sandbox_policy = SandboxPolicy::new_read_only_policy();
|
||||
forced_auto_mode_downgraded_on_windows = true;
|
||||
@@ -820,8 +837,9 @@ pub struct ConfigOverrides {
|
||||
pub config_profile: Option<String>,
|
||||
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub base_instructions: Option<String>,
|
||||
pub developer_instructions: Option<String>,
|
||||
pub compact_prompt: Option<String>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
pub tools_web_search_request: Option<bool>,
|
||||
pub experimental_sandbox_command_assessment: Option<bool>,
|
||||
@@ -850,8 +868,9 @@ impl Config {
|
||||
config_profile: config_profile_key,
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
include_apply_patch_tool: include_apply_patch_tool_override,
|
||||
include_view_image_tool: include_view_image_tool_override,
|
||||
show_raw_agent_reasoning,
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
experimental_sandbox_command_assessment: sandbox_command_assessment_override,
|
||||
@@ -878,12 +897,15 @@ impl Config {
|
||||
|
||||
let feature_overrides = FeatureOverrides {
|
||||
include_apply_patch_tool: include_apply_patch_tool_override,
|
||||
include_view_image_tool: include_view_image_tool_override,
|
||||
web_search_request: override_tools_web_search_request,
|
||||
experimental_sandbox_command_assessment: sandbox_command_assessment_override,
|
||||
};
|
||||
|
||||
let features = Features::from_config(&cfg, &config_profile, feature_overrides);
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
crate::safety::set_windows_sandbox_enabled(features.enabled(Feature::WindowsSandbox));
|
||||
}
|
||||
|
||||
let resolved_cwd = {
|
||||
use std::env;
|
||||
@@ -976,7 +998,6 @@ impl Config {
|
||||
let history = cfg.history.unwrap_or_default();
|
||||
|
||||
let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform);
|
||||
let include_view_image_tool_flag = features.enabled(Feature::ViewImageTool);
|
||||
let tools_web_search_request = features.enabled(Feature::WebSearchRequest);
|
||||
let use_experimental_streamable_shell_tool = features.enabled(Feature::StreamableShell);
|
||||
let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
|
||||
@@ -1026,6 +1047,15 @@ impl Config {
|
||||
.and_then(|info| info.auto_compact_token_limit)
|
||||
});
|
||||
|
||||
let compact_prompt = compact_prompt.or(cfg.compact_prompt).and_then(|value| {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed.to_string())
|
||||
}
|
||||
});
|
||||
|
||||
// Load base instructions override from a file if specified. If the
|
||||
// path is relative, resolve it against the effective cwd so the
|
||||
// behaviour matches other path-like config values.
|
||||
@@ -1033,9 +1063,24 @@ impl Config {
|
||||
.experimental_instructions_file
|
||||
.as_ref()
|
||||
.or(cfg.experimental_instructions_file.as_ref());
|
||||
let file_base_instructions =
|
||||
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
||||
let file_base_instructions = Self::load_override_from_file(
|
||||
experimental_instructions_path,
|
||||
&resolved_cwd,
|
||||
"experimental instructions file",
|
||||
)?;
|
||||
let base_instructions = base_instructions.or(file_base_instructions);
|
||||
let developer_instructions = developer_instructions.or(cfg.developer_instructions);
|
||||
|
||||
let experimental_compact_prompt_path = config_profile
|
||||
.experimental_compact_prompt_file
|
||||
.as_ref()
|
||||
.or(cfg.experimental_compact_prompt_file.as_ref());
|
||||
let file_compact_prompt = Self::load_override_from_file(
|
||||
experimental_compact_prompt_path,
|
||||
&resolved_cwd,
|
||||
"experimental compact prompt file",
|
||||
)?;
|
||||
let compact_prompt = compact_prompt.or(file_compact_prompt);
|
||||
|
||||
// Default review model when not set in config; allow CLI override to take precedence.
|
||||
let review_model = override_review_model
|
||||
@@ -1060,6 +1105,8 @@ impl Config {
|
||||
notify: cfg.notify,
|
||||
user_instructions,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
// The config.toml omits "_mode" because it's a config file. However, "_mode"
|
||||
// is important in code to differentiate the mode from the store implementation.
|
||||
cli_auth_credentials_store_mode: cfg.cli_auth_credentials_store.unwrap_or_default(),
|
||||
@@ -1112,7 +1159,6 @@ impl Config {
|
||||
use_experimental_streamable_shell_tool,
|
||||
use_experimental_unified_exec_tool,
|
||||
use_experimental_use_rmcp_client,
|
||||
include_view_image_tool: include_view_image_tool_flag,
|
||||
features,
|
||||
active_profile: active_profile_name,
|
||||
active_project,
|
||||
@@ -1156,18 +1202,15 @@ impl Config {
|
||||
None
|
||||
}
|
||||
|
||||
fn get_base_instructions(
|
||||
fn load_override_from_file(
|
||||
path: Option<&PathBuf>,
|
||||
cwd: &Path,
|
||||
description: &str,
|
||||
) -> std::io::Result<Option<String>> {
|
||||
let p = match path.as_ref() {
|
||||
None => return Ok(None),
|
||||
Some(p) => p,
|
||||
let Some(p) = path else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Resolve relative paths against the provided cwd to make CLI
|
||||
// overrides consistent regardless of where the process was launched
|
||||
// from.
|
||||
let full_path = if p.is_relative() {
|
||||
cwd.join(p)
|
||||
} else {
|
||||
@@ -1177,10 +1220,7 @@ impl Config {
|
||||
let contents = std::fs::read_to_string(&full_path).map_err(|e| {
|
||||
std::io::Error::new(
|
||||
e.kind(),
|
||||
format!(
|
||||
"failed to read experimental instructions file {}: {e}",
|
||||
full_path.display()
|
||||
),
|
||||
format!("failed to read {description} {}: {e}", full_path.display()),
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -1188,10 +1228,7 @@ impl Config {
|
||||
if s.is_empty() {
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"experimental instructions file is empty: {}",
|
||||
full_path.display()
|
||||
),
|
||||
format!("{description} is empty: {}", full_path.display()),
|
||||
))
|
||||
} else {
|
||||
Ok(Some(s))
|
||||
@@ -1244,12 +1281,12 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config_edit::ConfigEdit;
|
||||
use crate::config_edit::ConfigEditsBuilder;
|
||||
use crate::config_edit::apply_blocking;
|
||||
use crate::config_types::HistoryPersistence;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config_types::Notifications;
|
||||
use crate::config::edit::ConfigEdit;
|
||||
use crate::config::edit::ConfigEditsBuilder;
|
||||
use crate::config::edit::apply_blocking;
|
||||
use crate::config::types::HistoryPersistence;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use crate::config::types::Notifications;
|
||||
use crate::features::Feature;
|
||||
|
||||
use super::*;
|
||||
@@ -1556,7 +1593,7 @@ trust_level = "trusted"
|
||||
profiles.insert(
|
||||
"work".to_string(),
|
||||
ConfigProfile {
|
||||
include_view_image_tool: Some(false),
|
||||
tools_view_image: Some(false),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
@@ -1573,7 +1610,6 @@ trust_level = "trusted"
|
||||
)?;
|
||||
|
||||
assert!(!config.features.enabled(Feature::ViewImageTool));
|
||||
assert!(!config.include_view_image_tool);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2649,6 +2685,61 @@ model = "gpt-5-codex"
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cli_override_sets_compact_prompt() -> std::io::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let overrides = ConfigOverrides {
|
||||
compact_prompt: Some("Use the compact override".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
overrides,
|
||||
codex_home.path().to_path_buf(),
|
||||
)?;
|
||||
|
||||
assert_eq!(
|
||||
config.compact_prompt.as_deref(),
|
||||
Some("Use the compact override")
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loads_compact_prompt_from_file() -> std::io::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let workspace = codex_home.path().join("workspace");
|
||||
std::fs::create_dir_all(&workspace)?;
|
||||
|
||||
let prompt_path = workspace.join("compact_prompt.txt");
|
||||
std::fs::write(&prompt_path, " summarize differently ")?;
|
||||
|
||||
let cfg = ConfigToml {
|
||||
experimental_compact_prompt_file: Some(PathBuf::from("compact_prompt.txt")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let overrides = ConfigOverrides {
|
||||
cwd: Some(workspace),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
cfg,
|
||||
overrides,
|
||||
codex_home.path().to_path_buf(),
|
||||
)?;
|
||||
|
||||
assert_eq!(
|
||||
config.compact_prompt.as_deref(),
|
||||
Some("summarize differently")
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_test_fixture() -> std::io::Result<PrecedenceTestFixture> {
|
||||
let toml = r#"
|
||||
model = "o3"
|
||||
@@ -2804,6 +2895,8 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
forced_chatgpt_workspace_id: None,
|
||||
forced_login_method: None,
|
||||
include_apply_patch_tool: false,
|
||||
@@ -2812,7 +2905,6 @@ model_verbosity = "high"
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("o3".to_string()),
|
||||
active_project: ProjectConfig { trust_level: None },
|
||||
@@ -2875,6 +2967,8 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
forced_chatgpt_workspace_id: None,
|
||||
forced_login_method: None,
|
||||
include_apply_patch_tool: false,
|
||||
@@ -2883,7 +2977,6 @@ model_verbosity = "high"
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
active_project: ProjectConfig { trust_level: None },
|
||||
@@ -2961,6 +3054,8 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
forced_chatgpt_workspace_id: None,
|
||||
forced_login_method: None,
|
||||
include_apply_patch_tool: false,
|
||||
@@ -2969,7 +3064,6 @@ model_verbosity = "high"
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("zdr".to_string()),
|
||||
active_project: ProjectConfig { trust_level: None },
|
||||
@@ -3033,6 +3127,8 @@ model_verbosity = "high"
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
forced_chatgpt_workspace_id: None,
|
||||
forced_login_method: None,
|
||||
include_apply_patch_tool: false,
|
||||
@@ -3041,7 +3137,6 @@ model_verbosity = "high"
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
active_project: ProjectConfig { trust_level: None },
|
||||
@@ -3174,7 +3269,7 @@ trust_level = "trusted"
|
||||
|
||||
#[cfg(test)]
|
||||
mod notifications_tests {
|
||||
use crate::config_types::Notifications;
|
||||
use crate::config::types::Notifications;
|
||||
use assert_matches::assert_matches;
|
||||
use serde::Deserialize;
|
||||
|
||||
@@ -22,8 +22,8 @@ pub struct ConfigProfile {
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
pub experimental_compact_prompt_file: Option<PathBuf>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
@@ -73,7 +73,6 @@ impl ConversationHistory {
|
||||
pub(crate) fn get_history_for_prompt(&mut self) -> Vec<ResponseItem> {
|
||||
let mut history = self.get_history();
|
||||
Self::remove_ghost_snapshots(&mut history);
|
||||
Self::remove_reasoning_before_last_turn(&mut history);
|
||||
history
|
||||
}
|
||||
|
||||
@@ -125,25 +124,6 @@ impl ConversationHistory {
|
||||
items.retain(|item| !matches!(item, ResponseItem::GhostSnapshot { .. }));
|
||||
}
|
||||
|
||||
fn remove_reasoning_before_last_turn(items: &mut Vec<ResponseItem>) {
|
||||
// Responses API drops reasoning items before the last user message.
|
||||
// Sending them is harmless but can lead to validation errors when switching between API organizations.
|
||||
// https://cookbook.openai.com/examples/responses_api/reasoning_items#caching
|
||||
let Some(last_user_index) = items
|
||||
.iter()
|
||||
// Use last user message as the turn boundary.
|
||||
.rposition(|item| matches!(item, ResponseItem::Message { role, .. } if role == "user"))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let mut index = 0usize;
|
||||
items.retain(|item| {
|
||||
let keep = index >= last_user_index || !matches!(item, ResponseItem::Reasoning { .. });
|
||||
index += 1;
|
||||
keep
|
||||
});
|
||||
}
|
||||
|
||||
fn ensure_call_outputs_present(&mut self) {
|
||||
// Collect synthetic outputs to insert immediately after their calls.
|
||||
// Store the insertion position (index of call) alongside the item so
|
||||
@@ -386,23 +366,10 @@ impl ConversationHistory {
|
||||
match item {
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let truncated = format_output_for_model_body(output.content.as_str());
|
||||
let truncated_items = output.content_items.as_ref().map(|items| {
|
||||
items
|
||||
.iter()
|
||||
.map(|it| match it {
|
||||
FunctionCallOutputContentItem::InputText { text } => {
|
||||
FunctionCallOutputContentItem::InputText {
|
||||
text: format_output_for_model_body(text),
|
||||
}
|
||||
}
|
||||
FunctionCallOutputContentItem::InputImage { image_url } => {
|
||||
FunctionCallOutputContentItem::InputImage {
|
||||
image_url: image_url.clone(),
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
});
|
||||
let truncated_items = output
|
||||
.content_items
|
||||
.as_ref()
|
||||
.map(|items| globally_truncate_function_output_items(items));
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
output: FunctionCallOutputPayload {
|
||||
@@ -431,6 +398,53 @@ impl ConversationHistory {
|
||||
}
|
||||
}
|
||||
|
||||
fn globally_truncate_function_output_items(
|
||||
items: &[FunctionCallOutputContentItem],
|
||||
) -> Vec<FunctionCallOutputContentItem> {
|
||||
let mut out: Vec<FunctionCallOutputContentItem> = Vec::with_capacity(items.len());
|
||||
let mut remaining = MODEL_FORMAT_MAX_BYTES;
|
||||
let mut omitted_text_items = 0usize;
|
||||
|
||||
for it in items {
|
||||
match it {
|
||||
FunctionCallOutputContentItem::InputText { text } => {
|
||||
if remaining == 0 {
|
||||
omitted_text_items += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let len = text.len();
|
||||
if len <= remaining {
|
||||
out.push(FunctionCallOutputContentItem::InputText { text: text.clone() });
|
||||
remaining -= len;
|
||||
} else {
|
||||
let slice = take_bytes_at_char_boundary(text, remaining);
|
||||
if !slice.is_empty() {
|
||||
out.push(FunctionCallOutputContentItem::InputText {
|
||||
text: slice.to_string(),
|
||||
});
|
||||
}
|
||||
remaining = 0;
|
||||
}
|
||||
}
|
||||
// todo(aibrahim): handle input images; resize
|
||||
FunctionCallOutputContentItem::InputImage { image_url } => {
|
||||
out.push(FunctionCallOutputContentItem::InputImage {
|
||||
image_url: image_url.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if omitted_text_items > 0 {
|
||||
out.push(FunctionCallOutputContentItem::InputText {
|
||||
text: format!("[omitted {omitted_text_items} text items ...]"),
|
||||
});
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub(crate) fn format_output_for_model_body(content: &str) -> String {
|
||||
// Head+tail truncation for the model: show the beginning and end with an elision.
|
||||
// Clients still receive full streams; only this formatted summary is capped.
|
||||
@@ -540,15 +554,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn reasoning(id: &str) -> ResponseItem {
|
||||
ResponseItem::Reasoning {
|
||||
id: id.to_string(),
|
||||
summary: Vec::new(),
|
||||
content: None,
|
||||
encrypted_content: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_history_with_items(items: Vec<ResponseItem>) -> ConversationHistory {
|
||||
let mut h = ConversationHistory::new();
|
||||
h.record_items(items.iter());
|
||||
@@ -605,40 +610,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_history_drops_reasoning_before_last_user_message() {
|
||||
let mut history = ConversationHistory::new();
|
||||
let items = vec![
|
||||
user_msg("initial"),
|
||||
reasoning("first"),
|
||||
assistant_msg("ack"),
|
||||
user_msg("latest"),
|
||||
reasoning("second"),
|
||||
assistant_msg("ack"),
|
||||
reasoning("third"),
|
||||
];
|
||||
history.record_items(items.iter());
|
||||
|
||||
let filtered = history.get_history_for_prompt();
|
||||
assert_eq!(
|
||||
filtered,
|
||||
vec![
|
||||
user_msg("initial"),
|
||||
assistant_msg("ack"),
|
||||
user_msg("latest"),
|
||||
reasoning("second"),
|
||||
assistant_msg("ack"),
|
||||
reasoning("third"),
|
||||
]
|
||||
);
|
||||
let reasoning_count = history
|
||||
.contents()
|
||||
.iter()
|
||||
.filter(|item| matches!(item, ResponseItem::Reasoning { .. }))
|
||||
.count();
|
||||
assert_eq!(reasoning_count, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_history_for_prompt_drops_ghost_commits() {
|
||||
let items = vec![ResponseItem::GhostSnapshot {
|
||||
@@ -919,6 +890,81 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncates_across_multiple_under_limit_texts_and_reports_omitted() {
|
||||
// Arrange: several text items, none exceeding per-item limit, but total exceeds budget.
|
||||
let budget = MODEL_FORMAT_MAX_BYTES;
|
||||
let t1_len = (budget / 2).saturating_sub(10);
|
||||
let t2_len = (budget / 2).saturating_sub(10);
|
||||
let remaining_after_t1_t2 = budget.saturating_sub(t1_len + t2_len);
|
||||
let t3_len = 50; // gets truncated to remaining_after_t1_t2
|
||||
let t4_len = 5; // omitted
|
||||
let t5_len = 7; // omitted
|
||||
|
||||
let t1 = "a".repeat(t1_len);
|
||||
let t2 = "b".repeat(t2_len);
|
||||
let t3 = "c".repeat(t3_len);
|
||||
let t4 = "d".repeat(t4_len);
|
||||
let t5 = "e".repeat(t5_len);
|
||||
|
||||
let item = ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-omit".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "irrelevant".to_string(),
|
||||
content_items: Some(vec![
|
||||
FunctionCallOutputContentItem::InputText { text: t1 },
|
||||
FunctionCallOutputContentItem::InputText { text: t2 },
|
||||
FunctionCallOutputContentItem::InputImage {
|
||||
image_url: "img:mid".to_string(),
|
||||
},
|
||||
FunctionCallOutputContentItem::InputText { text: t3 },
|
||||
FunctionCallOutputContentItem::InputText { text: t4 },
|
||||
FunctionCallOutputContentItem::InputText { text: t5 },
|
||||
]),
|
||||
success: Some(true),
|
||||
},
|
||||
};
|
||||
|
||||
let mut history = ConversationHistory::new();
|
||||
history.record_items([&item]);
|
||||
assert_eq!(history.items.len(), 1);
|
||||
let json = serde_json::to_value(&history.items[0]).expect("serialize to json");
|
||||
|
||||
let output = json
|
||||
.get("output")
|
||||
.expect("output field")
|
||||
.as_array()
|
||||
.expect("array output");
|
||||
|
||||
// Expect: t1 (full), t2 (full), image, t3 (truncated), summary mentioning 2 omitted.
|
||||
assert_eq!(output.len(), 5);
|
||||
|
||||
let first = output[0].as_object().expect("first obj");
|
||||
assert_eq!(first.get("type").unwrap(), "input_text");
|
||||
let first_text = first.get("text").unwrap().as_str().unwrap();
|
||||
assert_eq!(first_text.len(), t1_len);
|
||||
|
||||
let second = output[1].as_object().expect("second obj");
|
||||
assert_eq!(second.get("type").unwrap(), "input_text");
|
||||
let second_text = second.get("text").unwrap().as_str().unwrap();
|
||||
assert_eq!(second_text.len(), t2_len);
|
||||
|
||||
assert_eq!(
|
||||
output[2],
|
||||
serde_json::json!({"type": "input_image", "image_url": "img:mid"})
|
||||
);
|
||||
|
||||
let fourth = output[3].as_object().expect("fourth obj");
|
||||
assert_eq!(fourth.get("type").unwrap(), "input_text");
|
||||
let fourth_text = fourth.get("text").unwrap().as_str().unwrap();
|
||||
assert_eq!(fourth_text.len(), remaining_after_t1_t2);
|
||||
|
||||
let summary = output[4].as_object().expect("summary obj");
|
||||
assert_eq!(summary.get("type").unwrap(), "input_text");
|
||||
let summary_text = summary.get("text").unwrap().as_str().unwrap();
|
||||
assert!(summary_text.contains("omitted 2 text items"));
|
||||
}
|
||||
|
||||
//TODO(aibrahim): run CI in release mode.
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[test]
|
||||
|
||||
@@ -4,6 +4,8 @@ use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::truncate::truncate_middle;
|
||||
use chrono::DateTime;
|
||||
use chrono::Datelike;
|
||||
use chrono::Local;
|
||||
use chrono::Utc;
|
||||
use codex_async_utils::CancelErr;
|
||||
use codex_protocol::ConversationId;
|
||||
@@ -253,7 +255,7 @@ impl std::fmt::Display for UsageLimitReachedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let message = match self.plan_type.as_ref() {
|
||||
Some(PlanType::Known(KnownPlan::Plus)) => format!(
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing){}",
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits{}",
|
||||
retry_suffix_after_or(self.resets_at.as_ref())
|
||||
),
|
||||
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
|
||||
@@ -266,8 +268,11 @@ impl std::fmt::Display for UsageLimitReachedError {
|
||||
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
|
||||
.to_string()
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
| Some(PlanType::Known(KnownPlan::Enterprise))
|
||||
Some(PlanType::Known(KnownPlan::Pro)) => format!(
|
||||
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits{}",
|
||||
retry_suffix_after_or(self.resets_at.as_ref())
|
||||
),
|
||||
Some(PlanType::Known(KnownPlan::Enterprise))
|
||||
| Some(PlanType::Known(KnownPlan::Edu)) => format!(
|
||||
"You've hit your usage limit.{}",
|
||||
retry_suffix(self.resets_at.as_ref())
|
||||
@@ -283,28 +288,46 @@ impl std::fmt::Display for UsageLimitReachedError {
|
||||
}
|
||||
|
||||
fn retry_suffix(resets_at: Option<&DateTime<Utc>>) -> String {
|
||||
if let Some(secs) = remaining_seconds(resets_at) {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" Try again in {reset_duration}.")
|
||||
if let Some(resets_at) = resets_at {
|
||||
let formatted = format_retry_timestamp(resets_at);
|
||||
format!(" Try again at {formatted}.")
|
||||
} else {
|
||||
" Try again later.".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_suffix_after_or(resets_at: Option<&DateTime<Utc>>) -> String {
|
||||
if let Some(secs) = remaining_seconds(resets_at) {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" or try again in {reset_duration}.")
|
||||
if let Some(resets_at) = resets_at {
|
||||
let formatted = format_retry_timestamp(resets_at);
|
||||
format!(" or try again at {formatted}.")
|
||||
} else {
|
||||
" or try again later.".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn remaining_seconds(resets_at: Option<&DateTime<Utc>>) -> Option<u64> {
|
||||
let resets_at = resets_at.cloned()?;
|
||||
let now = now_for_retry();
|
||||
let secs = resets_at.signed_duration_since(now).num_seconds();
|
||||
Some(if secs <= 0 { 0 } else { secs as u64 })
|
||||
fn format_retry_timestamp(resets_at: &DateTime<Utc>) -> String {
|
||||
let local_reset = resets_at.with_timezone(&Local);
|
||||
let local_now = now_for_retry().with_timezone(&Local);
|
||||
if local_reset.date_naive() == local_now.date_naive() {
|
||||
local_reset.format("%-I:%M %p").to_string()
|
||||
} else {
|
||||
let suffix = day_suffix(local_reset.day());
|
||||
local_reset
|
||||
.format(&format!("%b %-d{suffix}, %Y %-I:%M %p"))
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn day_suffix(day: u32) -> &'static str {
|
||||
match day {
|
||||
11..=13 => "th",
|
||||
_ => match day % 10 {
|
||||
1 => "st",
|
||||
2 => "nd", // codespell:ignore
|
||||
3 => "rd",
|
||||
_ => "th",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -323,36 +346,6 @@ fn now_for_retry() -> DateTime<Utc> {
|
||||
Utc::now()
|
||||
}
|
||||
|
||||
fn format_reset_duration(total_secs: u64) -> String {
|
||||
let days = total_secs / 86_400;
|
||||
let hours = (total_secs % 86_400) / 3_600;
|
||||
let minutes = (total_secs % 3_600) / 60;
|
||||
|
||||
let mut parts: Vec<String> = Vec::new();
|
||||
if days > 0 {
|
||||
let unit = if days == 1 { "day" } else { "days" };
|
||||
parts.push(format!("{days} {unit}"));
|
||||
}
|
||||
if hours > 0 {
|
||||
let unit = if hours == 1 { "hour" } else { "hours" };
|
||||
parts.push(format!("{hours} {unit}"));
|
||||
}
|
||||
if minutes > 0 {
|
||||
let unit = if minutes == 1 { "minute" } else { "minutes" };
|
||||
parts.push(format!("{minutes} {unit}"));
|
||||
}
|
||||
|
||||
if parts.is_empty() {
|
||||
return "less than a minute".to_string();
|
||||
}
|
||||
|
||||
match parts.len() {
|
||||
1 => parts[0].clone(),
|
||||
2 => format!("{} {}", parts[0], parts[1]),
|
||||
_ => format!("{} {} {}", parts[0], parts[1], parts[2]),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EnvVarError {
|
||||
/// Name of the environment variable that is missing.
|
||||
@@ -467,7 +460,7 @@ mod tests {
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again later."
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again later."
|
||||
);
|
||||
}
|
||||
|
||||
@@ -569,15 +562,16 @@ mod tests {
|
||||
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
|
||||
let resets_at = base + ChronoDuration::hours(1);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Team)),
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again in 1 hour."
|
||||
let expected = format!(
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again at {expected_time}."
|
||||
);
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -597,7 +591,7 @@ mod tests {
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_default_for_other_plans() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Enterprise)),
|
||||
resets_at: None,
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
@@ -607,20 +601,37 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_pro_plan_with_reset() {
|
||||
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
|
||||
let resets_at = base + ChronoDuration::hours(1);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
let expected = format!(
|
||||
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
|
||||
);
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_includes_minutes_when_available() {
|
||||
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
|
||||
let resets_at = base + ChronoDuration::minutes(5);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Try again in 5 minutes."
|
||||
);
|
||||
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -629,15 +640,16 @@ mod tests {
|
||||
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
|
||||
let resets_at = base + ChronoDuration::hours(3) + ChronoDuration::minutes(32);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again in 3 hours 32 minutes."
|
||||
let expected = format!(
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
|
||||
);
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -647,15 +659,14 @@ mod tests {
|
||||
let resets_at =
|
||||
base + ChronoDuration::days(2) + ChronoDuration::hours(3) + ChronoDuration::minutes(5);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Try again in 2 days 3 hours 5 minutes."
|
||||
);
|
||||
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -664,15 +675,14 @@ mod tests {
|
||||
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
|
||||
let resets_at = base + ChronoDuration::seconds(30);
|
||||
with_now_override(base, move || {
|
||||
let expected_time = format_retry_timestamp(&resets_at);
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_at: Some(resets_at),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Try again in less than a minute."
|
||||
);
|
||||
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
|
||||
assert_eq!(err.to_string(), expected);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,13 +13,19 @@ use codex_protocol::user_input::UserInput;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::user_instructions::UserInstructions;
|
||||
|
||||
fn is_session_prefix(text: &str) -> bool {
|
||||
let trimmed = text.trim_start();
|
||||
let lowered = trimmed.to_ascii_lowercase();
|
||||
lowered.starts_with("<environment_context>") || lowered.starts_with("<user_instructions>")
|
||||
lowered.starts_with("<environment_context>")
|
||||
}
|
||||
|
||||
fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
|
||||
if UserInstructions::is_user_instructions(message) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut content: Vec<UserInput> = Vec::new();
|
||||
|
||||
for content_item in message.iter() {
|
||||
@@ -167,6 +173,38 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skips_user_instructions_and_env() {
|
||||
let items = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "<user_instructions>test_text</user_instructions>".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "<environment_context>test_text</environment_context>".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(),
|
||||
}],
|
||||
},
|
||||
];
|
||||
|
||||
for item in items {
|
||||
let turn_item = parse_turn_item(&item);
|
||||
assert!(turn_item.is_none(), "expected none, got {turn_item:?}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parses_agent_message() {
|
||||
let item = ResponseItem::Message {
|
||||
|
||||
@@ -72,6 +72,9 @@ pub enum SandboxType {
|
||||
|
||||
/// Only available on Linux.
|
||||
LinuxSeccomp,
|
||||
|
||||
/// Only available on Windows.
|
||||
WindowsRestrictedToken,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -158,11 +161,79 @@ pub(crate) async fn execute_exec_env(
|
||||
};
|
||||
|
||||
let start = Instant::now();
|
||||
let raw_output_result = exec(params, sandbox_policy, stdout_stream).await;
|
||||
let raw_output_result = exec(params, sandbox, sandbox_policy, stdout_stream).await;
|
||||
let duration = start.elapsed();
|
||||
finalize_exec_result(raw_output_result, sandbox, duration)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
async fn exec_windows_sandbox(
|
||||
params: ExecParams,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
timeout_ms,
|
||||
..
|
||||
} = params;
|
||||
|
||||
let policy_str = match sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess => "workspace-write",
|
||||
SandboxPolicy::ReadOnly => "read-only",
|
||||
SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
|
||||
};
|
||||
|
||||
let sandbox_cwd = cwd.clone();
|
||||
let spawn_res = tokio::task::spawn_blocking(move || {
|
||||
run_windows_sandbox_capture(policy_str, &sandbox_cwd, command, &cwd, env, timeout_ms)
|
||||
})
|
||||
.await;
|
||||
|
||||
let capture = match spawn_res {
|
||||
Ok(Ok(v)) => v,
|
||||
Ok(Err(err)) => {
|
||||
return Err(CodexErr::Io(io::Error::other(format!(
|
||||
"windows sandbox: {err}"
|
||||
))));
|
||||
}
|
||||
Err(join_err) => {
|
||||
return Err(CodexErr::Io(io::Error::other(format!(
|
||||
"windows sandbox join error: {join_err}"
|
||||
))));
|
||||
}
|
||||
};
|
||||
|
||||
let exit_status = synthetic_exit_status(capture.exit_code);
|
||||
let stdout = StreamOutput {
|
||||
text: capture.stdout,
|
||||
truncated_after_lines: None,
|
||||
};
|
||||
let stderr = StreamOutput {
|
||||
text: capture.stderr,
|
||||
truncated_after_lines: None,
|
||||
};
|
||||
// Best-effort aggregate: stdout then stderr
|
||||
let mut aggregated = Vec::with_capacity(stdout.text.len() + stderr.text.len());
|
||||
append_all(&mut aggregated, &stdout.text);
|
||||
append_all(&mut aggregated, &stderr.text);
|
||||
let aggregated_output = StreamOutput {
|
||||
text: aggregated,
|
||||
truncated_after_lines: None,
|
||||
};
|
||||
|
||||
Ok(RawExecToolCallOutput {
|
||||
exit_status,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
timed_out: capture.timed_out,
|
||||
})
|
||||
}
|
||||
|
||||
fn finalize_exec_result(
|
||||
raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr>,
|
||||
sandbox_type: SandboxType,
|
||||
@@ -347,11 +418,17 @@ pub struct ExecToolCallOutput {
|
||||
pub timed_out: bool,
|
||||
}
|
||||
|
||||
#[cfg_attr(not(target_os = "windows"), allow(unused_variables))]
|
||||
async fn exec(
|
||||
params: ExecParams,
|
||||
sandbox: SandboxType,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
#[cfg(target_os = "windows")]
|
||||
if sandbox == SandboxType::WindowsRestrictedToken {
|
||||
return exec_windows_sandbox(params, sandbox_policy).await;
|
||||
}
|
||||
let timeout = params.timeout_duration();
|
||||
let ExecParams {
|
||||
command,
|
||||
@@ -525,8 +602,9 @@ fn synthetic_exit_status(code: i32) -> ExitStatus {
|
||||
#[cfg(windows)]
|
||||
fn synthetic_exit_status(code: i32) -> ExitStatus {
|
||||
use std::os::windows::process::ExitStatusExt;
|
||||
#[expect(clippy::unwrap_used)]
|
||||
std::process::ExitStatus::from_raw(code.try_into().unwrap())
|
||||
// On Windows the raw status is a u32. Use a direct cast to avoid
|
||||
// panicking on negative i32 values produced by prior narrowing casts.
|
||||
std::process::ExitStatus::from_raw(code as u32)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::config_types::EnvironmentVariablePattern;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::config_types::ShellEnvironmentPolicyInherit;
|
||||
use crate::config::types::EnvironmentVariablePattern;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyInherit;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
@@ -71,7 +71,7 @@ where
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_types::ShellEnvironmentPolicyInherit;
|
||||
use crate::config::types::ShellEnvironmentPolicyInherit;
|
||||
use maplit::hashmap;
|
||||
|
||||
fn make_vars(pairs: &[(&str, &str)]) -> Vec<(String, String)> {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
//! container attached to `Config`.
|
||||
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
@@ -43,6 +43,8 @@ pub enum Feature {
|
||||
SandboxCommandAssessment,
|
||||
/// Create a ghost commit at each turn.
|
||||
GhostCommit,
|
||||
/// Enable Windows sandbox (restricted token) on Windows.
|
||||
WindowsSandbox,
|
||||
}
|
||||
|
||||
impl Feature {
|
||||
@@ -82,7 +84,6 @@ pub struct Features {
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FeatureOverrides {
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub web_search_request: Option<bool>,
|
||||
pub experimental_sandbox_command_assessment: Option<bool>,
|
||||
}
|
||||
@@ -91,7 +92,6 @@ impl FeatureOverrides {
|
||||
fn apply(self, features: &mut Features) {
|
||||
LegacyFeatureToggles {
|
||||
include_apply_patch_tool: self.include_apply_patch_tool,
|
||||
include_view_image_tool: self.include_view_image_tool,
|
||||
tools_web_search: self.web_search_request,
|
||||
..Default::default()
|
||||
}
|
||||
@@ -193,7 +193,6 @@ impl Features {
|
||||
|
||||
let profile_legacy = LegacyFeatureToggles {
|
||||
include_apply_patch_tool: config_profile.include_apply_patch_tool,
|
||||
include_view_image_tool: config_profile.include_view_image_tool,
|
||||
experimental_sandbox_command_assessment: config_profile
|
||||
.experimental_sandbox_command_assessment,
|
||||
experimental_use_freeform_apply_patch: config_profile
|
||||
@@ -295,4 +294,10 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::WindowsSandbox,
|
||||
key: "enable_experimental_windows_sandbox",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -33,10 +33,6 @@ const ALIASES: &[Alias] = &[
|
||||
legacy_key: "include_apply_patch_tool",
|
||||
feature: Feature::ApplyPatchFreeform,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "include_view_image_tool",
|
||||
feature: Feature::ViewImageTool,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "web_search",
|
||||
feature: Feature::WebSearchRequest,
|
||||
@@ -56,7 +52,6 @@ pub(crate) fn feature_for_key(key: &str) -> Option<Feature> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LegacyFeatureToggles {
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub experimental_sandbox_command_assessment: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
@@ -110,12 +105,6 @@ impl LegacyFeatureToggles {
|
||||
self.tools_web_search,
|
||||
"tools.web_search",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ViewImageTool,
|
||||
self.include_view_image_tool,
|
||||
"include_view_image_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ViewImageTool,
|
||||
|
||||
@@ -17,10 +17,7 @@ pub use codex_conversation::CodexConversation;
|
||||
mod codex_delegate;
|
||||
mod command_safety;
|
||||
pub mod config;
|
||||
pub mod config_edit;
|
||||
pub mod config_loader;
|
||||
pub mod config_profile;
|
||||
pub mod config_types;
|
||||
mod conversation_history;
|
||||
pub mod custom_prompts;
|
||||
mod environment_context;
|
||||
|
||||
@@ -7,8 +7,8 @@ use codex_rmcp_client::determine_streamable_http_auth_status;
|
||||
use futures::future::join_all;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct McpAuthStatusEntry {
|
||||
|
||||
@@ -37,8 +37,8 @@ use tokio::task::JoinSet;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
|
||||
/// Delimiter used to separate the server name from the tool name in a fully
|
||||
/// qualified tool name.
|
||||
|
||||
@@ -28,7 +28,7 @@ use tokio::fs;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::config_types::HistoryPersistence;
|
||||
use crate::config::types::HistoryPersistence;
|
||||
|
||||
use codex_protocol::ConversationId;
|
||||
#[cfg(unix)]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::config::types::ReasoningSummaryFormat;
|
||||
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
|
||||
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
@@ -160,7 +160,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
support_verbosity: true,
|
||||
support_verbosity: false,
|
||||
)
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
model_family!(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::config::Config;
|
||||
use crate::config_types::OtelExporterKind as Kind;
|
||||
use crate::config_types::OtelHttpProtocol as Protocol;
|
||||
use crate::config::types::OtelExporterKind as Kind;
|
||||
use crate::config::types::OtelHttpProtocol as Protocol;
|
||||
use crate::default_client::originator;
|
||||
use codex_otel::config::OtelExporter;
|
||||
use codex_otel::config::OtelHttpProtocol;
|
||||
|
||||
@@ -46,6 +46,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::UndoCompleted(_)
|
||||
| EventMsg::TurnAborted(_) => true,
|
||||
EventMsg::Error(_)
|
||||
| EventMsg::Warning(_)
|
||||
| EventMsg::TaskStarted(_)
|
||||
| EventMsg::TaskComplete(_)
|
||||
| EventMsg::AgentMessageDelta(_)
|
||||
|
||||
@@ -10,6 +10,23 @@ use crate::exec::SandboxType;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
use std::sync::atomic::AtomicBool;
|
||||
#[cfg(target_os = "windows")]
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
static WINDOWS_SANDBOX_ENABLED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub fn set_windows_sandbox_enabled(enabled: bool) {
|
||||
WINDOWS_SANDBOX_ENABLED.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[allow(dead_code)]
|
||||
pub fn set_windows_sandbox_enabled(_enabled: bool) {}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum SafetyCheck {
|
||||
AutoApprove {
|
||||
@@ -84,6 +101,14 @@ pub fn get_platform_sandbox() -> Option<SandboxType> {
|
||||
Some(SandboxType::MacosSeatbelt)
|
||||
} else if cfg!(target_os = "linux") {
|
||||
Some(SandboxType::LinuxSeccomp)
|
||||
} else if cfg!(target_os = "windows") {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if WINDOWS_SANDBOX_ENABLED.load(Ordering::Relaxed) {
|
||||
return Some(SandboxType::WindowsRestrictedToken);
|
||||
}
|
||||
}
|
||||
None
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -74,25 +74,13 @@ impl SandboxManager {
|
||||
match pref {
|
||||
SandboxablePreference::Forbid => SandboxType::None,
|
||||
SandboxablePreference::Require => {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
return SandboxType::MacosSeatbelt;
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
return SandboxType::LinuxSeccomp;
|
||||
}
|
||||
#[allow(unreachable_code)]
|
||||
SandboxType::None
|
||||
// Require a platform sandbox when available; on Windows this
|
||||
// respects the enable_experimental_windows_sandbox feature.
|
||||
crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None)
|
||||
}
|
||||
SandboxablePreference::Auto => match policy {
|
||||
SandboxPolicy::DangerFullAccess => SandboxType::None,
|
||||
#[cfg(target_os = "macos")]
|
||||
_ => SandboxType::MacosSeatbelt,
|
||||
#[cfg(target_os = "linux")]
|
||||
_ => SandboxType::LinuxSeccomp,
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
_ => SandboxType::None,
|
||||
_ => crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -143,6 +131,14 @@ impl SandboxManager {
|
||||
Some("codex-linux-sandbox".to_string()),
|
||||
)
|
||||
}
|
||||
// On Windows, the restricted token sandbox executes in-process via the
|
||||
// codex-windows-sandbox crate. We leave the command unchanged here and
|
||||
// branch during execution based on the sandbox type.
|
||||
#[cfg(target_os = "windows")]
|
||||
SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None),
|
||||
// When building for non-Windows targets, this variant is never constructed.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None),
|
||||
};
|
||||
|
||||
env.extend(sandbox_env);
|
||||
|
||||
@@ -71,6 +71,10 @@
|
||||
(sysctl-name-prefix "net.routetable.")
|
||||
)
|
||||
|
||||
; Allow Java to set CPU type grade when required
|
||||
(allow sysctl-write
|
||||
(sysctl-name "kern.grade_cputype"))
|
||||
|
||||
; IOKit
|
||||
(allow iokit-open
|
||||
(iokit-registry-entry-class "RootDomainUserClient")
|
||||
|
||||
@@ -4,6 +4,8 @@ use async_trait::async_trait;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::AgentMessageContentDeltaEvent;
|
||||
use codex_protocol::protocol::AgentMessageDeltaEvent;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::ExitedReviewModeEvent;
|
||||
@@ -88,6 +90,7 @@ async fn start_review_conversation(
|
||||
session.clone_session(),
|
||||
ctx.clone(),
|
||||
cancellation_token,
|
||||
None,
|
||||
)
|
||||
.await)
|
||||
.ok()
|
||||
@@ -111,13 +114,15 @@ async fn process_review_events(
|
||||
}
|
||||
prev_agent_message = Some(event);
|
||||
}
|
||||
// Suppress ItemCompleted for assistant messages: forwarding it would
|
||||
// trigger legacy AgentMessage via as_legacy_events(), which this
|
||||
// Suppress ItemCompleted only for assistant messages: forwarding it
|
||||
// would trigger legacy AgentMessage via as_legacy_events(), which this
|
||||
// review flow intentionally hides in favor of structured output.
|
||||
EventMsg::ItemCompleted(ItemCompletedEvent {
|
||||
item: TurnItem::AgentMessage(_),
|
||||
..
|
||||
}) => {}
|
||||
})
|
||||
| EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { .. })
|
||||
| EventMsg::AgentMessageContentDelta(AgentMessageContentDeltaEvent { .. }) => {}
|
||||
EventMsg::TaskComplete(task_complete) => {
|
||||
// Parse review output from the last agent message (if present).
|
||||
let out = task_complete
|
||||
|
||||
@@ -89,7 +89,10 @@ impl SessionTask for UserShellCommandTask {
|
||||
let tool_call = ToolCall {
|
||||
tool_name: USER_SHELL_TOOL_NAME.to_string(),
|
||||
call_id: Uuid::new_v4().to_string(),
|
||||
payload: ToolPayload::LocalShell { params },
|
||||
payload: ToolPayload::LocalShell {
|
||||
params,
|
||||
is_user_shell_command: true,
|
||||
},
|
||||
};
|
||||
|
||||
let router = Arc::new(ToolRouter::from_config(&turn_context.tools_config, None));
|
||||
|
||||
@@ -40,6 +40,7 @@ pub enum ToolPayload {
|
||||
},
|
||||
LocalShell {
|
||||
params: ShellToolCallParams,
|
||||
is_user_shell_command: bool,
|
||||
},
|
||||
UnifiedExec {
|
||||
arguments: String,
|
||||
@@ -56,7 +57,7 @@ impl ToolPayload {
|
||||
match self {
|
||||
ToolPayload::Function { arguments } => Cow::Borrowed(arguments),
|
||||
ToolPayload::Custom { input } => Cow::Borrowed(input),
|
||||
ToolPayload::LocalShell { params } => Cow::Owned(params.command.join(" ")),
|
||||
ToolPayload::LocalShell { params, .. } => Cow::Owned(params.command.join(" ")),
|
||||
ToolPayload::UnifiedExec { arguments } => Cow::Borrowed(arguments),
|
||||
ToolPayload::Mcp { raw_arguments, .. } => Cow::Borrowed(raw_arguments),
|
||||
}
|
||||
|
||||
@@ -82,7 +82,10 @@ impl ToolHandler for ShellHandler {
|
||||
)
|
||||
.await
|
||||
}
|
||||
ToolPayload::LocalShell { params } => {
|
||||
ToolPayload::LocalShell {
|
||||
params,
|
||||
is_user_shell_command,
|
||||
} => {
|
||||
let exec_params = Self::to_exec_params(params, turn.as_ref());
|
||||
Self::run_exec_like(
|
||||
tool_name.as_str(),
|
||||
@@ -91,7 +94,7 @@ impl ToolHandler for ShellHandler {
|
||||
turn,
|
||||
tracker,
|
||||
call_id,
|
||||
true,
|
||||
is_user_shell_command,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -219,6 +222,7 @@ impl ShellHandler {
|
||||
env: exec_params.env.clone(),
|
||||
with_escalated_permissions: exec_params.with_escalated_permissions,
|
||||
justification: exec_params.justification.clone(),
|
||||
is_user_shell_command,
|
||||
};
|
||||
let mut orchestrator = ToolOrchestrator::new();
|
||||
let mut runtime = ShellRuntime::new();
|
||||
|
||||
@@ -83,6 +83,8 @@ impl ToolOrchestrator {
|
||||
if tool.wants_escalated_first_attempt(req) {
|
||||
initial_sandbox = crate::exec::SandboxType::None;
|
||||
}
|
||||
// Platform-specific flag gating is handled by SandboxManager::select_initial
|
||||
// via crate::safety::get_platform_sandbox().
|
||||
let initial_attempt = SandboxAttempt {
|
||||
sandbox: initial_sandbox,
|
||||
policy: &turn_ctx.sandbox_policy,
|
||||
|
||||
@@ -120,7 +120,10 @@ impl ToolRouter {
|
||||
Ok(Some(ToolCall {
|
||||
tool_name: "local_shell".to_string(),
|
||||
call_id,
|
||||
payload: ToolPayload::LocalShell { params },
|
||||
payload: ToolPayload::LocalShell {
|
||||
params,
|
||||
is_user_shell_command: false,
|
||||
},
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ pub struct ShellRequest {
|
||||
pub env: std::collections::HashMap<String, String>,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub is_user_shell_command: bool,
|
||||
}
|
||||
|
||||
impl ProvidesSandboxRetryData for ShellRequest {
|
||||
@@ -121,6 +122,9 @@ impl Approvable<ShellRequest> for ShellRuntime {
|
||||
policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
) -> bool {
|
||||
if req.is_user_shell_command {
|
||||
return false;
|
||||
}
|
||||
if is_known_safe_command(&req.command) {
|
||||
return false;
|
||||
}
|
||||
@@ -146,7 +150,7 @@ impl Approvable<ShellRequest> for ShellRuntime {
|
||||
}
|
||||
|
||||
fn wants_escalated_first_attempt(&self, req: &ShellRequest) -> bool {
|
||||
req.with_escalated_permissions.unwrap_or(false)
|
||||
req.is_user_shell_command || req.with_escalated_permissions.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,29 +3,25 @@ use serde::Serialize;
|
||||
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::USER_INSTRUCTIONS_CLOSE_TAG;
|
||||
use codex_protocol::protocol::USER_INSTRUCTIONS_OPEN_TAG;
|
||||
|
||||
/// Wraps user instructions in a tag so the model can classify them easily.
|
||||
pub const USER_INSTRUCTIONS_OPEN_TAG_LEGACY: &str = "<user_instructions>";
|
||||
pub const USER_INSTRUCTIONS_PREFIX: &str = "# AGENTS.md instructions for ";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "user_instructions", rename_all = "snake_case")]
|
||||
pub(crate) struct UserInstructions {
|
||||
text: String,
|
||||
pub directory: String,
|
||||
pub text: String,
|
||||
}
|
||||
|
||||
impl UserInstructions {
|
||||
pub fn new<T: Into<String>>(text: T) -> Self {
|
||||
Self { text: text.into() }
|
||||
}
|
||||
|
||||
/// Serializes the user instructions to an XML-like tagged block that starts
|
||||
/// with <user_instructions> so clients can classify it.
|
||||
pub fn serialize_to_xml(self) -> String {
|
||||
format!(
|
||||
"{USER_INSTRUCTIONS_OPEN_TAG}\n\n{}\n\n{USER_INSTRUCTIONS_CLOSE_TAG}",
|
||||
self.text
|
||||
)
|
||||
pub fn is_user_instructions(message: &[ContentItem]) -> bool {
|
||||
if let [ContentItem::InputText { text }] = message {
|
||||
text.starts_with(USER_INSTRUCTIONS_PREFIX)
|
||||
|| text.starts_with(USER_INSTRUCTIONS_OPEN_TAG_LEGACY)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,8 +31,88 @@ impl From<UserInstructions> for ResponseItem {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: ui.serialize_to_xml(),
|
||||
text: format!(
|
||||
"{USER_INSTRUCTIONS_PREFIX}{directory}\n\n<INSTRUCTIONS>\n{contents}\n</INSTRUCTIONS>",
|
||||
directory = ui.directory,
|
||||
contents = ui.text
|
||||
),
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "developer_instructions", rename_all = "snake_case")]
|
||||
pub(crate) struct DeveloperInstructions {
|
||||
text: String,
|
||||
}
|
||||
|
||||
impl DeveloperInstructions {
|
||||
pub fn new<T: Into<String>>(text: T) -> Self {
|
||||
Self { text: text.into() }
|
||||
}
|
||||
|
||||
pub fn into_text(self) -> String {
|
||||
self.text
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeveloperInstructions> for ResponseItem {
|
||||
fn from(di: DeveloperInstructions) -> Self {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "developer".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: di.into_text(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_user_instructions() {
|
||||
let user_instructions = UserInstructions {
|
||||
directory: "test_directory".to_string(),
|
||||
text: "test_text".to_string(),
|
||||
};
|
||||
let response_item: ResponseItem = user_instructions.into();
|
||||
|
||||
let ResponseItem::Message { role, content, .. } = response_item else {
|
||||
panic!("expected ResponseItem::Message");
|
||||
};
|
||||
|
||||
assert_eq!(role, "user");
|
||||
|
||||
let [ContentItem::InputText { text }] = content.as_slice() else {
|
||||
panic!("expected one InputText content item");
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
text,
|
||||
"# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_user_instructions() {
|
||||
assert!(UserInstructions::is_user_instructions(
|
||||
&[ContentItem::InputText {
|
||||
text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(),
|
||||
}]
|
||||
));
|
||||
assert!(UserInstructions::is_user_instructions(&[
|
||||
ContentItem::InputText {
|
||||
text: "<user_instructions>test_text</user_instructions>".to_string(),
|
||||
}
|
||||
]));
|
||||
assert!(!UserInstructions::is_user_instructions(&[
|
||||
ContentItem::InputText {
|
||||
text: "test_text".to_string(),
|
||||
}
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -479,6 +479,7 @@ pub async fn mount_sse_sequence(server: &MockServer, bodies: Vec<String>) -> Res
|
||||
|
||||
let (mock, response_mock) = base_mock();
|
||||
mock.respond_with(responder)
|
||||
.up_to_n_times(num_calls as u64)
|
||||
.expect(num_calls as u64)
|
||||
.mount(server)
|
||||
.await;
|
||||
|
||||
@@ -240,6 +240,30 @@ impl TestCodexHarness {
|
||||
.expect("output string")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub async fn custom_tool_call_output(&self, call_id: &str) -> String {
|
||||
let bodies = self.request_bodies().await;
|
||||
custom_tool_call_output(&bodies, call_id)
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.expect("output string")
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn custom_tool_call_output<'a>(bodies: &'a [Value], call_id: &str) -> &'a Value {
|
||||
for body in bodies {
|
||||
if let Some(items) = body.get("input").and_then(Value::as_array) {
|
||||
for item in items {
|
||||
if item.get("type").and_then(Value::as_str) == Some("custom_tool_call_output")
|
||||
&& item.get("call_id").and_then(Value::as_str) == Some(call_id)
|
||||
{
|
||||
return item;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
panic!("custom_tool_call_output {call_id} not found");
|
||||
}
|
||||
|
||||
fn function_call_output<'a>(bodies: &'a [Value], call_id: &str) -> &'a Value {
|
||||
|
||||
@@ -18,7 +18,7 @@ use tempfile::TempDir;
|
||||
use wiremock::matchers::header;
|
||||
|
||||
#[tokio::test]
|
||||
async fn responses_stream_includes_task_type_header() {
|
||||
async fn responses_stream_includes_subagent_header_on_review() {
|
||||
core_test_support::skip_if_no_network!();
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
@@ -27,9 +27,12 @@ async fn responses_stream_includes_task_type_header() {
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
|
||||
let request_recorder =
|
||||
responses::mount_sse_once_match(&server, header("Codex-Task-Type", "exec"), response_body)
|
||||
.await;
|
||||
let request_recorder = responses::mount_sse_once_match(
|
||||
&server,
|
||||
header("x-openai-subagent", "review"),
|
||||
response_body,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = ModelProviderInfo {
|
||||
name: "mock".into(),
|
||||
@@ -76,7 +79,7 @@ async fn responses_stream_includes_task_type_header() {
|
||||
effort,
|
||||
summary,
|
||||
conversation_id,
|
||||
SessionSource::Exec,
|
||||
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Review),
|
||||
);
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
@@ -96,5 +99,98 @@ async fn responses_stream_includes_task_type_header() {
|
||||
}
|
||||
|
||||
let request = request_recorder.single_request();
|
||||
assert_eq!(request.header("Codex-Task-Type").as_deref(), Some("exec"));
|
||||
assert_eq!(
|
||||
request.header("x-openai-subagent").as_deref(),
|
||||
Some("review")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn responses_stream_includes_subagent_header_on_other() {
|
||||
core_test_support::skip_if_no_network!();
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let response_body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
|
||||
let request_recorder = responses::mount_sse_once_match(
|
||||
&server,
|
||||
header("x-openai-subagent", "my-task"),
|
||||
response_body,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = ModelProviderInfo {
|
||||
name: "mock".into(),
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
let codex_home = TempDir::new().expect("failed to create TempDir");
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider_id = provider.name.clone();
|
||||
config.model_provider = provider.clone();
|
||||
let effort = config.model_reasoning_effort;
|
||||
let summary = config.model_reasoning_summary;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let otel_event_manager = OtelEventManager::new(
|
||||
conversation_id,
|
||||
config.model.as_str(),
|
||||
config.model_family.slug.as_str(),
|
||||
None,
|
||||
Some("test@test.com".to_string()),
|
||||
Some(AuthMode::ChatGPT),
|
||||
false,
|
||||
"test".to_string(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
otel_event_manager,
|
||||
provider,
|
||||
effort,
|
||||
summary,
|
||||
conversation_id,
|
||||
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Other(
|
||||
"my-task".to_string(),
|
||||
)),
|
||||
);
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = vec![ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".into(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
}];
|
||||
|
||||
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
||||
while let Some(event) = stream.next().await {
|
||||
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let request = request_recorder.single_request();
|
||||
assert_eq!(
|
||||
request.header("x-openai-subagent").as_deref(),
|
||||
Some("my-task")
|
||||
);
|
||||
}
|
||||
|
||||
1001
codex-rs/core/tests/suite/apply_patch_freeform.rs
Normal file
1001
codex-rs/core/tests/suite/apply_patch_freeform.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -58,6 +58,18 @@ fn assert_message_role(request_body: &serde_json::Value, role: &str) {
|
||||
assert_eq!(request_body["role"].as_str().unwrap(), role);
|
||||
}
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
fn assert_message_equals(request_body: &serde_json::Value, text: &str) {
|
||||
let content = request_body["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid message content");
|
||||
|
||||
assert_eq!(
|
||||
content, text,
|
||||
"expected message content '{content}' to equal '{text}'"
|
||||
);
|
||||
}
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
|
||||
let content = request_body["content"][0]["text"]
|
||||
@@ -601,13 +613,81 @@ async fn includes_user_instructions_message_in_request() {
|
||||
.contains("be nice")
|
||||
);
|
||||
assert_message_role(&request_body["input"][0], "user");
|
||||
assert_message_starts_with(&request_body["input"][0], "<user_instructions>");
|
||||
assert_message_ends_with(&request_body["input"][0], "</user_instructions>");
|
||||
assert_message_starts_with(&request_body["input"][0], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][0], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][0]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid message content");
|
||||
assert!(ui_text.contains("<INSTRUCTIONS>"));
|
||||
assert!(ui_text.contains("be nice"));
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
assert_message_starts_with(&request_body["input"][1], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn includes_developer_instructions_message_in_request() {
|
||||
skip_if_no_network!();
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let resp_mock =
|
||||
responses::mount_sse_once_match(&server, path("/v1/responses"), sse_completed("resp1"))
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be nice".to_string());
|
||||
config.developer_instructions = Some("be useful".to_string());
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let request = resp_mock.single_request();
|
||||
let request_body = request.body_json();
|
||||
|
||||
assert!(
|
||||
!request_body["instructions"]
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.contains("be nice")
|
||||
);
|
||||
assert_message_role(&request_body["input"][0], "developer");
|
||||
assert_message_equals(&request_body["input"][0], "be useful");
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][1], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid message content");
|
||||
assert!(ui_text.contains("<INSTRUCTIONS>"));
|
||||
assert!(ui_text.contains("be nice"));
|
||||
assert_message_role(&request_body["input"][2], "user");
|
||||
assert_message_starts_with(&request_body["input"][2], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][2], "</environment_context>");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
skip_if_no_network!();
|
||||
|
||||
@@ -8,6 +8,7 @@ use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::skip_if_no_network;
|
||||
@@ -45,6 +46,7 @@ const CONTEXT_LIMIT_MESSAGE: &str =
|
||||
const DUMMY_FUNCTION_NAME: &str = "unsupported_tool";
|
||||
const DUMMY_CALL_ID: &str = "call-multi-auto";
|
||||
const FUNCTION_CALL_LIMIT_MSG: &str = "function call limit push";
|
||||
pub(super) const COMPACT_WARNING_MESSAGE: &str = "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.";
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn summarize_context_three_requests_and_instructions() {
|
||||
@@ -118,6 +120,11 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
|
||||
// 2) Summarize – second hit should include the summarization prompt.
|
||||
codex.submit(Op::Compact).await.unwrap();
|
||||
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
|
||||
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
|
||||
panic!("expected warning event after compact");
|
||||
};
|
||||
assert_eq!(message, COMPACT_WARNING_MESSAGE);
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// 3) Next user input – third hit; history should include only the summary.
|
||||
@@ -261,6 +268,70 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn manual_compact_uses_custom_prompt() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let sse_stream = sse(vec![ev_completed("r1")]);
|
||||
mount_sse_once(&server, sse_stream).await;
|
||||
|
||||
let custom_prompt = "Use this compact prompt instead";
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
config.compact_prompt = Some(custom_prompt.to_string());
|
||||
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create conversation")
|
||||
.conversation;
|
||||
|
||||
codex.submit(Op::Compact).await.expect("trigger compact");
|
||||
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
|
||||
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
|
||||
panic!("expected warning event after compact");
|
||||
};
|
||||
assert_eq!(message, COMPACT_WARNING_MESSAGE);
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.expect("collect requests");
|
||||
let body = requests
|
||||
.iter()
|
||||
.find_map(|req| req.body_json::<serde_json::Value>().ok())
|
||||
.expect("summary request body");
|
||||
|
||||
let input = body
|
||||
.get("input")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("input array");
|
||||
let mut found_custom_prompt = false;
|
||||
let mut found_default_prompt = false;
|
||||
|
||||
for item in input {
|
||||
if item["type"].as_str() != Some("message") {
|
||||
continue;
|
||||
}
|
||||
let text = item["content"][0]["text"].as_str().unwrap_or_default();
|
||||
if text == custom_prompt {
|
||||
found_custom_prompt = true;
|
||||
}
|
||||
if text == SUMMARIZATION_PROMPT {
|
||||
found_default_prompt = true;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(found_custom_prompt, "custom prompt should be injected");
|
||||
assert!(!found_default_prompt, "default prompt should be replaced");
|
||||
}
|
||||
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
@@ -683,7 +754,6 @@ async fn manual_compact_retries_after_context_window_error() {
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex.submit(Op::Compact).await.unwrap();
|
||||
|
||||
let EventMsg::BackgroundEvent(event) =
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::BackgroundEvent(_))).await
|
||||
else {
|
||||
@@ -694,6 +764,11 @@ async fn manual_compact_retries_after_context_window_error() {
|
||||
"background event should mention trimmed item count: {}",
|
||||
event.message
|
||||
);
|
||||
let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await;
|
||||
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
|
||||
panic!("expected warning event after compact retry");
|
||||
};
|
||||
assert_eq!(message, COMPACT_WARNING_MESSAGE);
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = request_log.requests();
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
//! request payload that Codex would send to the model and assert that the
|
||||
//! model-visible history matches the expected sequence of messages.
|
||||
|
||||
use super::compact::COMPACT_WARNING_MESSAGE;
|
||||
use super::compact::FIRST_REPLY;
|
||||
use super::compact::SUMMARY_TEXT;
|
||||
use codex_core::CodexAuth;
|
||||
@@ -20,6 +21,7 @@ use codex_core::config::Config;
|
||||
use codex_core::config::OPENAI_DEFAULT_MODEL;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
@@ -813,6 +815,11 @@ async fn compact_conversation(conversation: &Arc<CodexConversation>) {
|
||||
.submit(Op::Compact)
|
||||
.await
|
||||
.expect("compact conversation");
|
||||
let warning_event = wait_for_event(conversation, |ev| matches!(ev, EventMsg::Warning(_))).await;
|
||||
let EventMsg::Warning(WarningEvent { message }) = warning_event else {
|
||||
panic!("expected warning event after compact");
|
||||
};
|
||||
assert_eq!(message, COMPACT_WARNING_MESSAGE);
|
||||
wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ mod abort_tasks;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod apply_patch_cli;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod apply_patch_freeform;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod approvals;
|
||||
mod cli_stream;
|
||||
mod client;
|
||||
@@ -36,6 +38,7 @@ mod tool_harness;
|
||||
mod tool_parallelism;
|
||||
mod tools;
|
||||
mod truncation;
|
||||
mod undo;
|
||||
mod unified_exec;
|
||||
mod user_notification;
|
||||
mod user_shell_cmd;
|
||||
|
||||
@@ -18,10 +18,7 @@ use codex_core::shell::default_user_shell;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::mount_sse_once;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use std::collections::HashMap;
|
||||
use tempfile::TempDir;
|
||||
@@ -357,8 +354,10 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
None => String::new(),
|
||||
}
|
||||
);
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
let expected_ui_text = format!(
|
||||
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
|
||||
cwd.path().to_string_lossy()
|
||||
);
|
||||
|
||||
let expected_env_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -737,9 +736,11 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() {
|
||||
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
let expected_ui_msg = text_user_input(expected_ui_text.to_string());
|
||||
let expected_ui_text = format!(
|
||||
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
|
||||
default_cwd.to_string_lossy()
|
||||
);
|
||||
let expected_ui_msg = text_user_input(expected_ui_text);
|
||||
|
||||
let expected_env_msg_1 = text_user_input(default_env_context_str(
|
||||
&cwd.path().to_string_lossy(),
|
||||
@@ -851,8 +852,10 @@ async fn send_user_turn_with_changes_sends_environment_context() {
|
||||
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
let expected_ui_text = format!(
|
||||
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
|
||||
default_cwd.to_string_lossy()
|
||||
);
|
||||
let expected_ui_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -886,68 +889,3 @@ async fn send_user_turn_with_changes_sends_environment_context() {
|
||||
]);
|
||||
assert_eq!(body2["input"], expected_input_2);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn cached_prompt_filters_reasoning_items_from_previous_turns() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let call_id = "shell-call";
|
||||
let shell_args = serde_json::json!({
|
||||
"command": ["/bin/echo", "tool output"],
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
|
||||
let initial_response = responses::sse(vec![
|
||||
responses::ev_response_created("resp-first"),
|
||||
responses::ev_reasoning_item("reason-1", &["Planning shell command"], &[]),
|
||||
responses::ev_function_call(
|
||||
call_id,
|
||||
"shell",
|
||||
&serde_json::to_string(&shell_args).expect("serialize shell args"),
|
||||
),
|
||||
responses::ev_completed("resp-first"),
|
||||
]);
|
||||
let follow_up_response = responses::sse(vec![
|
||||
responses::ev_response_created("resp-follow-up"),
|
||||
responses::ev_reasoning_item(
|
||||
"reason-2",
|
||||
&["Shell execution completed"],
|
||||
&["stdout: tool output"],
|
||||
),
|
||||
responses::ev_assistant_message("assistant-1", "First turn reply"),
|
||||
responses::ev_completed("resp-follow-up"),
|
||||
]);
|
||||
let second_turn_response = responses::sse(vec![
|
||||
responses::ev_response_created("resp-second"),
|
||||
responses::ev_assistant_message("assistant-2", "Second turn reply"),
|
||||
responses::ev_completed("resp-second"),
|
||||
]);
|
||||
mount_sse_once(&server, initial_response).await;
|
||||
let second_request = mount_sse_once(&server, follow_up_response).await;
|
||||
let third_request = mount_sse_once(&server, second_turn_response).await;
|
||||
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.submit_turn("hello 1").await?;
|
||||
test.submit_turn("hello 2").await?;
|
||||
|
||||
let second_request_input = second_request.single_request();
|
||||
let reasoning_items = second_request_input.inputs_of_type("reasoning");
|
||||
assert_eq!(
|
||||
reasoning_items.len(),
|
||||
1,
|
||||
"expected first turn follow-up to include reasoning item"
|
||||
);
|
||||
|
||||
let third_request_input = third_request.single_request();
|
||||
let cached_reasoning = third_request_input.inputs_of_type("reasoning");
|
||||
assert_eq!(
|
||||
cached_reasoning.len(),
|
||||
0,
|
||||
"expected cached prompt to filter out prior reasoning items"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -204,6 +204,85 @@ async fn review_op_with_plain_text_emits_review_fallback() {
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// Ensure review flow suppresses assistant-specific streaming/completion events:
|
||||
/// - AgentMessageContentDelta
|
||||
/// - AgentMessageDelta (legacy)
|
||||
/// - ItemCompleted for TurnItem::AgentMessage
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
#[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))]
|
||||
#[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))]
|
||||
async fn review_filters_agent_message_related_events() {
|
||||
skip_if_no_network!();
|
||||
|
||||
// Stream simulating a typing assistant message with deltas and finalization.
|
||||
let sse_raw = r#"[
|
||||
{"type":"response.output_item.added", "item":{
|
||||
"type":"message", "role":"assistant", "id":"msg-1",
|
||||
"content":[{"type":"output_text","text":""}]
|
||||
}},
|
||||
{"type":"response.output_text.delta", "delta":"Hi"},
|
||||
{"type":"response.output_text.delta", "delta":" there"},
|
||||
{"type":"response.output_item.done", "item":{
|
||||
"type":"message", "role":"assistant", "id":"msg-1",
|
||||
"content":[{"type":"output_text","text":"Hi there"}]
|
||||
}},
|
||||
{"type":"response.completed", "response": {"id": "__ID__"}}
|
||||
]"#;
|
||||
let server = start_responses_server_with_sse(sse_raw, 1).await;
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await;
|
||||
|
||||
codex
|
||||
.submit(Op::Review {
|
||||
review_request: ReviewRequest {
|
||||
prompt: "Filter streaming events".to_string(),
|
||||
user_facing_hint: "Filter streaming events".to_string(),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut saw_entered = false;
|
||||
let mut saw_exited = false;
|
||||
|
||||
// Drain until TaskComplete; assert filtered events never surface.
|
||||
wait_for_event_with_timeout(
|
||||
&codex,
|
||||
|event| match event {
|
||||
EventMsg::TaskComplete(_) => true,
|
||||
EventMsg::EnteredReviewMode(_) => {
|
||||
saw_entered = true;
|
||||
false
|
||||
}
|
||||
EventMsg::ExitedReviewMode(_) => {
|
||||
saw_exited = true;
|
||||
false
|
||||
}
|
||||
// The following must be filtered by review flow
|
||||
EventMsg::AgentMessageContentDelta(_) => {
|
||||
panic!("unexpected AgentMessageContentDelta surfaced during review")
|
||||
}
|
||||
EventMsg::AgentMessageDelta(_) => {
|
||||
panic!("unexpected AgentMessageDelta surfaced during review")
|
||||
}
|
||||
EventMsg::ItemCompleted(ev) => match &ev.item {
|
||||
codex_protocol::items::TurnItem::AgentMessage(_) => {
|
||||
panic!(
|
||||
"unexpected ItemCompleted for TurnItem::AgentMessage surfaced during review"
|
||||
)
|
||||
}
|
||||
_ => false,
|
||||
},
|
||||
_ => false,
|
||||
},
|
||||
tokio::time::Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
assert!(saw_entered && saw_exited, "missing review lifecycle events");
|
||||
|
||||
server.verify().await;
|
||||
}
|
||||
|
||||
/// When the model returns structured JSON in a review, ensure no AgentMessage
|
||||
/// is emitted; the UI consumes the structured result via ExitedReviewMode.
|
||||
// Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts.
|
||||
|
||||
@@ -8,8 +8,8 @@ use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
|
||||
use codex_core::protocol::AskForApproval;
|
||||
|
||||
@@ -203,6 +203,69 @@ async fn python_getpwuid_works_under_seatbelt() {
|
||||
assert!(status.success(), "python exited with {status:?}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn java_home_finds_runtime_under_seatbelt() {
|
||||
if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) {
|
||||
eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test.");
|
||||
return;
|
||||
}
|
||||
|
||||
let java_home_path = Path::new("/usr/libexec/java_home");
|
||||
if !java_home_path.exists() {
|
||||
eprintln!("/usr/libexec/java_home is not present, skipping test.");
|
||||
return;
|
||||
}
|
||||
|
||||
let baseline_output = tokio::process::Command::new(java_home_path)
|
||||
.env_remove("JAVA_HOME")
|
||||
.output()
|
||||
.await
|
||||
.expect("should be able to invoke java_home outside seatbelt");
|
||||
if !baseline_output.status.success() {
|
||||
eprintln!(
|
||||
"java_home exited with {:?} outside seatbelt, skipping test",
|
||||
baseline_output.status
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let policy = SandboxPolicy::ReadOnly;
|
||||
let command_cwd = std::env::current_dir().expect("getcwd");
|
||||
let sandbox_cwd = command_cwd.clone();
|
||||
|
||||
let mut env: HashMap<String, String> = std::env::vars().collect();
|
||||
env.remove("JAVA_HOME");
|
||||
env.remove(CODEX_SANDBOX_ENV_VAR);
|
||||
|
||||
let child = spawn_command_under_seatbelt(
|
||||
vec![java_home_path.to_string_lossy().to_string()],
|
||||
command_cwd,
|
||||
&policy,
|
||||
sandbox_cwd.as_path(),
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
env,
|
||||
)
|
||||
.await
|
||||
.expect("should be able to spawn java_home under seatbelt");
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.expect("should be able to wait for java_home child");
|
||||
assert!(
|
||||
output.status.success(),
|
||||
"java_home under seatbelt exited with {:?}, stderr: {}",
|
||||
output.status,
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
assert!(
|
||||
!stdout.trim().is_empty(),
|
||||
"java_home stdout unexpectedly empty under seatbelt"
|
||||
);
|
||||
}
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
fn create_test_scenario(tmp: &TempDir) -> TestScenario {
|
||||
let repo_parent = tmp.path().to_path_buf();
|
||||
|
||||
@@ -30,6 +30,18 @@ use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::fs;
|
||||
|
||||
const FIXTURE_JSON: &str = r#"{
|
||||
"description": "This is an example JSON file.",
|
||||
"foo": "bar",
|
||||
"isTest": true,
|
||||
"testNumber": 123,
|
||||
"testArray": [1, 2, 3],
|
||||
"testObject": {
|
||||
"foo": "bar"
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
async fn submit_turn(test: &TestCodex, prompt: &str, sandbox_policy: SandboxPolicy) -> Result<()> {
|
||||
let session_model = test.session_configured.model.clone();
|
||||
|
||||
@@ -225,6 +237,154 @@ freeform shell
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_output_preserves_fixture_json_without_serialization() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.features.disable(Feature::ApplyPatchFreeform);
|
||||
config.model = "gpt-5".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5").expect("gpt-5 is a model family");
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let fixture_path = test.cwd.path().join("fixture.json");
|
||||
fs::write(&fixture_path, FIXTURE_JSON)?;
|
||||
let fixture_path_str = fixture_path.to_string_lossy().to_string();
|
||||
|
||||
let call_id = "shell-json-fixture";
|
||||
let args = json!({
|
||||
"command": ["/usr/bin/sed", "-n", "p", fixture_path_str],
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"read the fixture JSON with sed",
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server
|
||||
.received_requests()
|
||||
.await
|
||||
.expect("recorded requests present");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let output_item = find_function_call_output(&bodies, call_id).expect("shell output present");
|
||||
let output = output_item
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.expect("shell output string");
|
||||
|
||||
let mut parsed: Value = serde_json::from_str(output)?;
|
||||
if let Some(metadata) = parsed.get_mut("metadata").and_then(Value::as_object_mut) {
|
||||
let _ = metadata.remove("duration_seconds");
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
parsed
|
||||
.get("metadata")
|
||||
.and_then(|metadata| metadata.get("exit_code"))
|
||||
.and_then(Value::as_i64),
|
||||
Some(0),
|
||||
"expected zero exit code when serialization is disabled",
|
||||
);
|
||||
let stdout = parsed
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
assert_eq!(
|
||||
stdout, FIXTURE_JSON,
|
||||
"expected shell output to match the fixture contents"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_output_structures_fixture_with_serialization() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.features.enable(Feature::ApplyPatchFreeform);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let fixture_path = test.cwd.path().join("fixture.json");
|
||||
fs::write(&fixture_path, FIXTURE_JSON)?;
|
||||
let fixture_path_str = fixture_path.to_string_lossy().to_string();
|
||||
|
||||
let call_id = "shell-structured-fixture";
|
||||
let args = json!({
|
||||
"command": ["/usr/bin/sed", "-n", "p", fixture_path_str],
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"read the fixture JSON with structured output",
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server
|
||||
.received_requests()
|
||||
.await
|
||||
.expect("recorded requests present");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let output_item =
|
||||
find_function_call_output(&bodies, call_id).expect("structured output present");
|
||||
let output = output_item
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.expect("structured output string");
|
||||
|
||||
assert!(
|
||||
serde_json::from_str::<Value>(output).is_err(),
|
||||
"expected structured output to be plain text"
|
||||
);
|
||||
let (header, body) = output
|
||||
.split_once("Output:\n")
|
||||
.expect("structured output contains an Output section");
|
||||
assert_regex_match(
|
||||
r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds$",
|
||||
header.trim_end(),
|
||||
);
|
||||
assert_eq!(
|
||||
body, FIXTURE_JSON,
|
||||
"expected Output section to include the fixture contents"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_output_for_freeform_tool_records_duration() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
@@ -3,9 +3,16 @@
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::assert_regex_match;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
@@ -18,10 +25,13 @@ use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use escargot::CargoBuild;
|
||||
use regex_lite::Regex;
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use wiremock::matchers::any;
|
||||
|
||||
// Verifies byte-truncation formatting for function error output (RespondToModel errors)
|
||||
@@ -219,8 +229,8 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()>
|
||||
config.features.enable(Feature::RmcpClient);
|
||||
config.mcp_servers.insert(
|
||||
server_name.to_string(),
|
||||
codex_core::config_types::McpServerConfig {
|
||||
transport: codex_core::config_types::McpServerTransportConfig::Stdio {
|
||||
codex_core::config::types::McpServerConfig {
|
||||
transport: codex_core::config::types::McpServerTransportConfig::Stdio {
|
||||
command: rmcp_test_server_bin,
|
||||
args: Vec::new(),
|
||||
env: None,
|
||||
@@ -268,3 +278,105 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()>
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that an MCP image tool output is serialized as content_items array with
|
||||
// the image preserved and no truncation summary appended (since there are no text items).
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let call_id = "rmcp-image-no-trunc";
|
||||
let server_name = "rmcp";
|
||||
let tool_name = format!("mcp__{server_name}__image");
|
||||
|
||||
mount_sse_once_match(
|
||||
&server,
|
||||
any(),
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, &tool_name, "{}"),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
let final_mock = mount_sse_once_match(
|
||||
&server,
|
||||
any(),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Build the stdio rmcp server and pass a tiny PNG via data URL so it can construct ImageContent.
|
||||
let rmcp_test_server_bin = CargoBuild::new()
|
||||
.package("codex-rmcp-client")
|
||||
.bin("test_stdio_server")
|
||||
.run()?
|
||||
.path()
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
|
||||
// 1x1 PNG data URL
|
||||
let openai_png = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMB/ee9bQAAAABJRU5ErkJggg==";
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.features.enable(Feature::RmcpClient);
|
||||
config.mcp_servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: rmcp_test_server_bin,
|
||||
args: Vec::new(),
|
||||
env: Some(HashMap::from([(
|
||||
"MCP_TEST_IMAGE_DATA_URL".to_string(),
|
||||
openai_png.to_string(),
|
||||
)])),
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
},
|
||||
enabled: true,
|
||||
startup_timeout_sec: Some(Duration::from_secs(10)),
|
||||
tool_timeout_sec: None,
|
||||
enabled_tools: None,
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
text: "call the rmcp image tool".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: fixture.cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Wait for completion to ensure the outbound request is captured.
|
||||
wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
let output_item = final_mock.single_request().function_call_output(call_id);
|
||||
// Expect exactly one array element: the image item; and no trailing summary text.
|
||||
let output = output_item.get("output").expect("output");
|
||||
assert!(output.is_array(), "expected array output");
|
||||
let arr = output.as_array().unwrap();
|
||||
assert_eq!(arr.len(), 1, "no truncation summary should be appended");
|
||||
assert_eq!(
|
||||
arr[0],
|
||||
json!({"type": "input_image", "image_url": openai_png})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
491
codex-rs/core/tests/suite/undo.rs
Normal file
491
codex-rs/core/tests/suite/undo.rs
Normal file
@@ -0,0 +1,491 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::UndoCompletedEvent;
|
||||
use core_test_support::responses::ev_apply_patch_function_call;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::mount_sse_sequence;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodexHarness;
|
||||
use core_test_support::wait_for_event_match;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
async fn undo_harness() -> Result<TestCodexHarness> {
|
||||
TestCodexHarness::with_config(|config: &mut Config| {
|
||||
config.include_apply_patch_tool = true;
|
||||
config.model = "gpt-5".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5").expect("gpt-5 is valid");
|
||||
config.features.enable(Feature::GhostCommit);
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn git(path: &Path, args: &[&str]) -> Result<()> {
|
||||
let status = Command::new("git")
|
||||
.args(args)
|
||||
.current_dir(path)
|
||||
.status()
|
||||
.with_context(|| format!("failed to run git {args:?}"))?;
|
||||
if status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
let exit_status = status;
|
||||
bail!("git {args:?} exited with {exit_status}");
|
||||
}
|
||||
|
||||
fn git_output(path: &Path, args: &[&str]) -> Result<String> {
|
||||
let output = Command::new("git")
|
||||
.args(args)
|
||||
.current_dir(path)
|
||||
.output()
|
||||
.with_context(|| format!("failed to run git {args:?}"))?;
|
||||
if !output.status.success() {
|
||||
let exit_status = output.status;
|
||||
bail!("git {args:?} exited with {exit_status}");
|
||||
}
|
||||
String::from_utf8(output.stdout).context("stdout was not valid utf8")
|
||||
}
|
||||
|
||||
fn init_git_repo(path: &Path) -> Result<()> {
|
||||
// Use a consistent initial branch and config across environments to avoid
|
||||
// CI variance (default-branch hints, line ending differences, etc.).
|
||||
git(path, &["init", "--initial-branch=main"])?;
|
||||
git(path, &["config", "core.autocrlf", "false"])?;
|
||||
git(path, &["config", "user.name", "Codex Tests"])?;
|
||||
git(path, &["config", "user.email", "codex-tests@example.com"])?;
|
||||
|
||||
// Create README.txt
|
||||
let readme_path = path.join("README.txt");
|
||||
fs::write(&readme_path, "Test repository initialized by Codex.\n")?;
|
||||
|
||||
// Stage and commit
|
||||
git(path, &["add", "README.txt"])?;
|
||||
git(path, &["commit", "-m", "Add README.txt"])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_patch_responses(call_id: &str, patch: &str, assistant_msg: &str) -> Vec<String> {
|
||||
vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_apply_patch_function_call(call_id, patch),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", assistant_msg),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
]
|
||||
}
|
||||
|
||||
async fn run_apply_patch_turn(
|
||||
harness: &TestCodexHarness,
|
||||
prompt: &str,
|
||||
call_id: &str,
|
||||
patch: &str,
|
||||
assistant_msg: &str,
|
||||
) -> Result<()> {
|
||||
mount_sse_sequence(
|
||||
harness.server(),
|
||||
apply_patch_responses(call_id, patch, assistant_msg),
|
||||
)
|
||||
.await;
|
||||
harness.submit(prompt).await
|
||||
}
|
||||
|
||||
async fn invoke_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> {
|
||||
codex.submit(Op::Undo).await?;
|
||||
let event = wait_for_event_match(codex, |msg| match msg {
|
||||
EventMsg::UndoCompleted(done) => Some(done.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.await;
|
||||
Ok(event)
|
||||
}
|
||||
|
||||
async fn expect_successful_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> {
|
||||
let event = invoke_undo(codex).await?;
|
||||
assert!(
|
||||
event.success,
|
||||
"expected undo to succeed but failed with message {:?}",
|
||||
event.message
|
||||
);
|
||||
Ok(event)
|
||||
}
|
||||
|
||||
async fn expect_failed_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> {
|
||||
let event = invoke_undo(codex).await?;
|
||||
assert!(
|
||||
!event.success,
|
||||
"expected undo to fail but succeeded with message {:?}",
|
||||
event.message
|
||||
);
|
||||
assert_eq!(
|
||||
event.message.as_deref(),
|
||||
Some("No ghost snapshot available to undo.")
|
||||
);
|
||||
Ok(event)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_removes_new_file_created_during_turn() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let call_id = "undo-create-file";
|
||||
let patch = "*** Begin Patch\n*** Add File: new_file.txt\n+from turn\n*** End Patch";
|
||||
run_apply_patch_turn(&harness, "create file", call_id, patch, "ok").await?;
|
||||
|
||||
let new_path = harness.path("new_file.txt");
|
||||
assert_eq!(fs::read_to_string(&new_path)?, "from turn\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
let completed = expect_successful_undo(&codex).await?;
|
||||
assert!(completed.success, "undo failed: {:?}", completed.message);
|
||||
|
||||
assert!(!new_path.exists());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_restores_tracked_file_edit() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let tracked = harness.path("tracked.txt");
|
||||
fs::write(&tracked, "before\n")?;
|
||||
git(harness.cwd(), &["add", "tracked.txt"])?;
|
||||
git(harness.cwd(), &["commit", "-m", "track file"])?;
|
||||
|
||||
let patch = "*** Begin Patch\n*** Update File: tracked.txt\n@@\n-before\n+after\n*** End Patch";
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"update tracked file",
|
||||
"undo-tracked-edit",
|
||||
patch,
|
||||
"done",
|
||||
)
|
||||
.await?;
|
||||
println!(
|
||||
"apply_patch output: {}",
|
||||
harness.function_call_stdout("undo-tracked-edit").await
|
||||
);
|
||||
|
||||
assert_eq!(fs::read_to_string(&tracked)?, "after\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
let completed = expect_successful_undo(&codex).await?;
|
||||
assert!(completed.success, "undo failed: {:?}", completed.message);
|
||||
|
||||
assert_eq!(fs::read_to_string(&tracked)?, "before\n");
|
||||
let status = git_output(harness.cwd(), &["status", "--short"])?;
|
||||
assert_eq!(status, "");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_restores_untracked_file_edit() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
git(harness.cwd(), &["commit", "--allow-empty", "-m", "init"])?;
|
||||
|
||||
let notes = harness.path("notes.txt");
|
||||
fs::write(¬es, "original\n")?;
|
||||
let status_before = git_output(harness.cwd(), &["status", "--short", "--ignored"])?;
|
||||
assert!(status_before.contains("?? notes.txt"));
|
||||
|
||||
let patch =
|
||||
"*** Begin Patch\n*** Update File: notes.txt\n@@\n-original\n+modified\n*** End Patch";
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"edit untracked",
|
||||
"undo-untracked-edit",
|
||||
patch,
|
||||
"done",
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(fs::read_to_string(¬es)?, "modified\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
let completed = expect_successful_undo(&codex).await?;
|
||||
assert!(completed.success, "undo failed: {:?}", completed.message);
|
||||
|
||||
assert_eq!(fs::read_to_string(¬es)?, "original\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_reverts_only_latest_turn() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let call_id_one = "undo-turn-one";
|
||||
let add_patch = "*** Begin Patch\n*** Add File: story.txt\n+first version\n*** End Patch";
|
||||
run_apply_patch_turn(&harness, "create story", call_id_one, add_patch, "done").await?;
|
||||
let story = harness.path("story.txt");
|
||||
assert_eq!(fs::read_to_string(&story)?, "first version\n");
|
||||
|
||||
let call_id_two = "undo-turn-two";
|
||||
let update_patch = "*** Begin Patch\n*** Update File: story.txt\n@@\n-first version\n+second version\n*** End Patch";
|
||||
run_apply_patch_turn(&harness, "revise story", call_id_two, update_patch, "done").await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "second version\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
let completed = expect_successful_undo(&codex).await?;
|
||||
assert!(completed.success, "undo failed: {:?}", completed.message);
|
||||
|
||||
assert_eq!(fs::read_to_string(&story)?, "first version\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_does_not_touch_unrelated_files() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let tracked_constant = harness.path("stable.txt");
|
||||
fs::write(&tracked_constant, "stable\n")?;
|
||||
let target = harness.path("target.txt");
|
||||
fs::write(&target, "start\n")?;
|
||||
let gitignore = harness.path(".gitignore");
|
||||
fs::write(&gitignore, "ignored-stable.log\n")?;
|
||||
git(
|
||||
harness.cwd(),
|
||||
&["add", "stable.txt", "target.txt", ".gitignore"],
|
||||
)?;
|
||||
git(harness.cwd(), &["commit", "-m", "seed tracked"])?;
|
||||
|
||||
let preexisting_untracked = harness.path("scratch.txt");
|
||||
fs::write(&preexisting_untracked, "scratch before\n")?;
|
||||
let ignored = harness.path("ignored-stable.log");
|
||||
fs::write(&ignored, "ignored before\n")?;
|
||||
|
||||
let full_patch = "*** Begin Patch\n*** Update File: target.txt\n@@\n-start\n+edited\n*** Add File: temp.txt\n+ephemeral\n*** End Patch";
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"modify target",
|
||||
"undo-unrelated",
|
||||
full_patch,
|
||||
"done",
|
||||
)
|
||||
.await?;
|
||||
let temp = harness.path("temp.txt");
|
||||
assert_eq!(fs::read_to_string(&target)?, "edited\n");
|
||||
assert_eq!(fs::read_to_string(&temp)?, "ephemeral\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
let completed = expect_successful_undo(&codex).await?;
|
||||
assert!(completed.success, "undo failed: {:?}", completed.message);
|
||||
|
||||
assert_eq!(fs::read_to_string(&tracked_constant)?, "stable\n");
|
||||
assert_eq!(fs::read_to_string(&target)?, "start\n");
|
||||
assert_eq!(
|
||||
fs::read_to_string(&preexisting_untracked)?,
|
||||
"scratch before\n"
|
||||
);
|
||||
assert_eq!(fs::read_to_string(&ignored)?, "ignored before\n");
|
||||
assert!(!temp.exists());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_sequential_turns_consumes_snapshots() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let story = harness.path("story.txt");
|
||||
fs::write(&story, "initial\n")?;
|
||||
git(harness.cwd(), &["add", "story.txt"])?;
|
||||
git(harness.cwd(), &["commit", "-m", "seed story"])?;
|
||||
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"first change",
|
||||
"seq-turn-1",
|
||||
"*** Begin Patch\n*** Update File: story.txt\n@@\n-initial\n+turn one\n*** End Patch",
|
||||
"ok",
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "turn one\n");
|
||||
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"second change",
|
||||
"seq-turn-2",
|
||||
"*** Begin Patch\n*** Update File: story.txt\n@@\n-turn one\n+turn two\n*** End Patch",
|
||||
"ok",
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "turn two\n");
|
||||
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"third change",
|
||||
"seq-turn-3",
|
||||
"*** Begin Patch\n*** Update File: story.txt\n@@\n-turn two\n+turn three\n*** End Patch",
|
||||
"ok",
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "turn three\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
expect_successful_undo(&codex).await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "turn two\n");
|
||||
|
||||
expect_successful_undo(&codex).await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "turn one\n");
|
||||
|
||||
expect_successful_undo(&codex).await?;
|
||||
assert_eq!(fs::read_to_string(&story)?, "initial\n");
|
||||
|
||||
expect_failed_undo(&codex).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_without_snapshot_reports_failure() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
|
||||
expect_failed_undo(&codex).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_restores_moves_and_renames() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let source = harness.path("rename_me.txt");
|
||||
fs::write(&source, "original\n")?;
|
||||
git(harness.cwd(), &["add", "rename_me.txt"])?;
|
||||
git(harness.cwd(), &["commit", "-m", "add rename target"])?;
|
||||
|
||||
let patch = "*** Begin Patch\n*** Update File: rename_me.txt\n*** Move to: relocated/renamed.txt\n@@\n-original\n+renamed content\n*** End Patch";
|
||||
run_apply_patch_turn(&harness, "rename file", "undo-rename", patch, "done").await?;
|
||||
|
||||
let destination = harness.path("relocated/renamed.txt");
|
||||
assert!(!source.exists());
|
||||
assert_eq!(fs::read_to_string(&destination)?, "renamed content\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
expect_successful_undo(&codex).await?;
|
||||
|
||||
assert_eq!(fs::read_to_string(&source)?, "original\n");
|
||||
assert!(!destination.exists());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_does_not_touch_ignored_directory_contents() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let gitignore = harness.path(".gitignore");
|
||||
fs::write(&gitignore, "logs/\n")?;
|
||||
git(harness.cwd(), &["add", ".gitignore"])?;
|
||||
git(harness.cwd(), &["commit", "-m", "ignore logs directory"])?;
|
||||
|
||||
let logs_dir = harness.path("logs");
|
||||
fs::create_dir_all(&logs_dir)?;
|
||||
let preserved = logs_dir.join("persistent.log");
|
||||
fs::write(&preserved, "keep me\n")?;
|
||||
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"write log",
|
||||
"undo-log",
|
||||
"*** Begin Patch\n*** Add File: logs/session.log\n+ephemeral log\n*** End Patch",
|
||||
"ok",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let new_log = logs_dir.join("session.log");
|
||||
assert_eq!(fs::read_to_string(&new_log)?, "ephemeral log\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
expect_successful_undo(&codex).await?;
|
||||
|
||||
assert!(new_log.exists());
|
||||
assert_eq!(fs::read_to_string(&preserved)?, "keep me\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn undo_overwrites_manual_edits_after_turn() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = undo_harness().await?;
|
||||
init_git_repo(harness.cwd())?;
|
||||
|
||||
let tracked = harness.path("tracked.txt");
|
||||
fs::write(&tracked, "baseline\n")?;
|
||||
git(harness.cwd(), &["add", "tracked.txt"])?;
|
||||
git(harness.cwd(), &["commit", "-m", "baseline tracked"])?;
|
||||
|
||||
run_apply_patch_turn(
|
||||
&harness,
|
||||
"modify tracked",
|
||||
"undo-manual-overwrite",
|
||||
"*** Begin Patch\n*** Update File: tracked.txt\n@@\n-baseline\n+turn change\n*** End Patch",
|
||||
"ok",
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(fs::read_to_string(&tracked)?, "turn change\n");
|
||||
|
||||
fs::write(&tracked, "manual edit\n")?;
|
||||
assert_eq!(fs::read_to_string(&tracked)?, "manual edit\n");
|
||||
|
||||
let codex = Arc::clone(&harness.test().codex);
|
||||
expect_successful_undo(&codex).await?;
|
||||
|
||||
assert_eq!(fs::read_to_string(&tracked)?, "baseline\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -61,6 +61,7 @@ Request `newConversation` params (subset):
|
||||
- `sandbox`: `read-only` | `workspace-write` | `danger-full-access`
|
||||
- `config`: map of additional config overrides
|
||||
- `baseInstructions`: optional instruction override
|
||||
- `compactPrompt`: optional replacement for the default compaction prompt
|
||||
- `includePlanTool` / `includeApplyPatchTool`: booleans
|
||||
|
||||
Response: `{ conversationId, model, reasoningEffort?, rolloutPath }`
|
||||
|
||||
@@ -73,6 +73,7 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc
|
||||
- `EventMsg::ExecApprovalRequest` – Request approval from user to execute a command
|
||||
- `EventMsg::TaskComplete` – A task completed successfully
|
||||
- `EventMsg::Error` – A task stopped with an error
|
||||
- `EventMsg::Warning` – A non-fatal warning that the client should surface to the user
|
||||
- `EventMsg::TurnComplete` – Contains a `response_id` bookmark for last `response_id` executed by the task. This can be used to continue the task at a later point in time, perhaps with additional user input.
|
||||
|
||||
The `response_id` returned from each task matches the OpenAI `response_id` stored in the API's `/responses` endpoint. It can be stored and used in future `Sessions` to resume threads of work.
|
||||
|
||||
@@ -21,6 +21,7 @@ use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TurnAbortReason;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_protocol::num_format::format_with_separators;
|
||||
use owo_colors::OwoColorize;
|
||||
@@ -54,6 +55,7 @@ pub(crate) struct EventProcessorWithHumanOutput {
|
||||
red: Style,
|
||||
green: Style,
|
||||
cyan: Style,
|
||||
yellow: Style,
|
||||
|
||||
/// Whether to include `AgentReasoning` events in the output.
|
||||
show_agent_reasoning: bool,
|
||||
@@ -81,6 +83,7 @@ impl EventProcessorWithHumanOutput {
|
||||
red: Style::new().red(),
|
||||
green: Style::new().green(),
|
||||
cyan: Style::new().cyan(),
|
||||
yellow: Style::new().yellow(),
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
last_message_path,
|
||||
@@ -97,6 +100,7 @@ impl EventProcessorWithHumanOutput {
|
||||
red: Style::new(),
|
||||
green: Style::new(),
|
||||
cyan: Style::new(),
|
||||
yellow: Style::new(),
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
last_message_path,
|
||||
@@ -161,6 +165,13 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
let prefix = "ERROR:".style(self.red);
|
||||
ts_msg!(self, "{prefix} {message}");
|
||||
}
|
||||
EventMsg::Warning(WarningEvent { message }) => {
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} {message}",
|
||||
"warning:".style(self.yellow).style(self.bold)
|
||||
);
|
||||
}
|
||||
EventMsg::DeprecationNotice(DeprecationNoticeEvent { summary, details }) => {
|
||||
ts_msg!(
|
||||
self,
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::event_processor::handle_last_message;
|
||||
use crate::exec_events::AgentMessageItem;
|
||||
use crate::exec_events::CommandExecutionItem;
|
||||
use crate::exec_events::CommandExecutionStatus;
|
||||
use crate::exec_events::ErrorItem;
|
||||
use crate::exec_events::FileChangeItem;
|
||||
use crate::exec_events::FileUpdateChange;
|
||||
use crate::exec_events::ItemCompletedEvent;
|
||||
@@ -129,6 +130,15 @@ impl EventProcessorWithJsonOutput {
|
||||
self.last_critical_error = Some(error.clone());
|
||||
vec![ThreadEvent::Error(error)]
|
||||
}
|
||||
EventMsg::Warning(ev) => {
|
||||
let item = ThreadItem {
|
||||
id: self.get_next_item_id(),
|
||||
details: ThreadItemDetails::Error(ErrorItem {
|
||||
message: ev.message.clone(),
|
||||
}),
|
||||
};
|
||||
vec![ThreadEvent::ItemCompleted(ItemCompletedEvent { item })]
|
||||
}
|
||||
EventMsg::StreamError(ev) => vec![ThreadEvent::Error(ThreadErrorEvent {
|
||||
message: ev.message.clone(),
|
||||
})],
|
||||
|
||||
@@ -174,8 +174,9 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
model_provider,
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
include_apply_patch_tool: None,
|
||||
include_view_image_tool: None,
|
||||
show_raw_agent_reasoning: oss.then_some(true),
|
||||
tools_web_search_request: None,
|
||||
experimental_sandbox_command_assessment: None,
|
||||
|
||||
@@ -12,11 +12,13 @@ use codex_core::protocol::McpToolCallEndEvent;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_exec::event_processor_with_jsonl_output::EventProcessorWithJsonOutput;
|
||||
use codex_exec::exec_events::AgentMessageItem;
|
||||
use codex_exec::exec_events::CommandExecutionItem;
|
||||
use codex_exec::exec_events::CommandExecutionStatus;
|
||||
use codex_exec::exec_events::ErrorItem;
|
||||
use codex_exec::exec_events::ItemCompletedEvent;
|
||||
use codex_exec::exec_events::ItemStartedEvent;
|
||||
use codex_exec::exec_events::ItemUpdatedEvent;
|
||||
@@ -540,6 +542,28 @@ fn error_event_produces_error() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn warning_event_produces_error_item() {
|
||||
let mut ep = EventProcessorWithJsonOutput::new(None);
|
||||
let out = ep.collect_thread_events(&event(
|
||||
"e1",
|
||||
EventMsg::Warning(WarningEvent {
|
||||
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
|
||||
}),
|
||||
));
|
||||
assert_eq!(
|
||||
out,
|
||||
vec![ThreadEvent::ItemCompleted(ItemCompletedEvent {
|
||||
item: ThreadItem {
|
||||
id: "item_0".to_string(),
|
||||
details: ThreadItemDetails::Error(ErrorItem {
|
||||
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
|
||||
}),
|
||||
},
|
||||
})]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stream_error_event_produces_error() {
|
||||
let mut ep = EventProcessorWithJsonOutput::new(None);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![cfg(target_os = "linux")]
|
||||
use codex_core::config_types::ShellEnvironmentPolicy;
|
||||
use codex_core::config::types::ShellEnvironmentPolicy;
|
||||
use codex_core::error::CodexErr;
|
||||
use codex_core::error::SandboxErr;
|
||||
use codex_core::exec::ExecParams;
|
||||
|
||||
@@ -49,6 +49,14 @@ pub struct CodexToolCallParam {
|
||||
/// The set of instructions to use instead of the default ones.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub base_instructions: Option<String>,
|
||||
|
||||
/// Developer instructions that should be injected as a developer role message.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub developer_instructions: Option<String>,
|
||||
|
||||
/// Prompt used when compacting the conversation.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub compact_prompt: Option<String>,
|
||||
}
|
||||
|
||||
/// Custom enum mirroring [`AskForApproval`], but has an extra dependency on
|
||||
@@ -141,6 +149,8 @@ impl CodexToolCallParam {
|
||||
sandbox,
|
||||
config: cli_overrides,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
} = self;
|
||||
|
||||
// Build the `ConfigOverrides` recognized by codex-core.
|
||||
@@ -154,8 +164,9 @@ impl CodexToolCallParam {
|
||||
model_provider: None,
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
include_apply_patch_tool: None,
|
||||
include_view_image_tool: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
experimental_sandbox_command_assessment: None,
|
||||
@@ -288,6 +299,14 @@ mod tests {
|
||||
"description": "The set of instructions to use instead of the default ones.",
|
||||
"type": "string"
|
||||
},
|
||||
"developer-instructions": {
|
||||
"description": "Developer instructions that should be injected as a developer role message.",
|
||||
"type": "string"
|
||||
},
|
||||
"compact-prompt": {
|
||||
"description": "Prompt used when compacting the conversation.",
|
||||
"type": "string"
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"prompt"
|
||||
|
||||
@@ -204,6 +204,9 @@ async fn run_codex_tool_session_inner(
|
||||
outgoing.send_response(request_id.clone(), result).await;
|
||||
break;
|
||||
}
|
||||
EventMsg::Warning(_) => {
|
||||
continue;
|
||||
}
|
||||
EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
|
||||
call_id,
|
||||
reason,
|
||||
|
||||
@@ -341,6 +341,7 @@ async fn codex_tool_passes_base_instructions() -> anyhow::Result<()> {
|
||||
.send_codex_tool_call(CodexToolCallParam {
|
||||
prompt: "How are you?".to_string(),
|
||||
base_instructions: Some("You are a helpful assistant.".to_string()),
|
||||
developer_instructions: Some("Foreshadow upcoming tool calls.".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -367,10 +368,28 @@ async fn codex_tool_passes_base_instructions() -> anyhow::Result<()> {
|
||||
);
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
let request = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
let request = requests[0].body_json::<serde_json::Value>()?;
|
||||
let instructions = request["messages"][0]["content"].as_str().unwrap();
|
||||
assert!(instructions.starts_with("You are a helpful assistant."));
|
||||
|
||||
let developer_msg = request["messages"]
|
||||
.as_array()
|
||||
.and_then(|messages| {
|
||||
messages
|
||||
.iter()
|
||||
.find(|msg| msg.get("role").and_then(|role| role.as_str()) == Some("developer"))
|
||||
})
|
||||
.unwrap();
|
||||
let developer_content = developer_msg
|
||||
.get("content")
|
||||
.and_then(|value| value.as_str())
|
||||
.unwrap();
|
||||
assert!(
|
||||
!developer_content.contains('<'),
|
||||
"expected developer instructions without XML tags, got `{developer_content}`"
|
||||
);
|
||||
assert_eq!(developer_content, "Foreshadow upcoming tool calls.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -332,6 +332,7 @@ class StructField:
|
||||
name: str
|
||||
type_name: str
|
||||
serde: str | None = None
|
||||
ts: str | None = None
|
||||
comment: str | None = None
|
||||
|
||||
def append(self, out: list[str], supports_const: bool) -> None:
|
||||
@@ -339,6 +340,8 @@ class StructField:
|
||||
out.append(f" // {self.comment}\n")
|
||||
if self.serde:
|
||||
out.append(f" {self.serde}\n")
|
||||
if self.ts:
|
||||
out.append(f" {self.ts}\n")
|
||||
if self.viz == "const":
|
||||
if supports_const:
|
||||
out.append(f" const {self.name}: {self.type_name};\n")
|
||||
@@ -378,9 +381,9 @@ def define_struct(
|
||||
prop_type = f"Option<{prop_type}>"
|
||||
rs_prop = rust_prop_name(prop_name, is_optional)
|
||||
if prop_type.startswith("&'static str"):
|
||||
fields.append(StructField("const", rs_prop.name, prop_type, rs_prop.serde))
|
||||
fields.append(StructField("const", rs_prop.name, prop_type, rs_prop.serde, rs_prop.ts))
|
||||
else:
|
||||
fields.append(StructField("pub", rs_prop.name, prop_type, rs_prop.serde))
|
||||
fields.append(StructField("pub", rs_prop.name, prop_type, rs_prop.serde, rs_prop.ts))
|
||||
|
||||
# Special-case: add Codex-specific user_agent to Implementation
|
||||
if name == "Implementation":
|
||||
@@ -390,6 +393,7 @@ def define_struct(
|
||||
"user_agent",
|
||||
"Option<String>",
|
||||
'#[serde(default, skip_serializing_if = "Option::is_none")]',
|
||||
'#[ts(optional)]',
|
||||
"This is an extra field that the Codex MCP server sends as part of InitializeResult.",
|
||||
)
|
||||
)
|
||||
@@ -474,7 +478,6 @@ def define_string_enum(
|
||||
out.append(f" {capitalize(value)},\n")
|
||||
|
||||
out.append("}\n\n")
|
||||
return out
|
||||
|
||||
|
||||
def define_untagged_enum(name: str, type_list: list[str], out: list[str]) -> None:
|
||||
@@ -590,7 +593,7 @@ def get_serde_annotation_for_anyof_type(type_name: str) -> str | None:
|
||||
|
||||
|
||||
def map_type(
|
||||
typedef: dict[str, any],
|
||||
typedef: dict[str, Any],
|
||||
prop_name: str | None = None,
|
||||
struct_name: str | None = None,
|
||||
) -> str:
|
||||
@@ -665,7 +668,8 @@ class RustProp:
|
||||
name: str
|
||||
# serde annotation, if necessary
|
||||
serde: str | None = None
|
||||
|
||||
# ts annotation, if necessary
|
||||
ts: str | None = None
|
||||
|
||||
def rust_prop_name(name: str, is_optional: bool) -> RustProp:
|
||||
"""Convert a JSON property name to a Rust property name."""
|
||||
@@ -684,6 +688,7 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
|
||||
prop_name = name
|
||||
|
||||
serde_annotations = []
|
||||
ts_str = None
|
||||
if is_rename:
|
||||
serde_annotations.append(f'rename = "{name}"')
|
||||
if is_optional:
|
||||
@@ -691,13 +696,18 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
|
||||
serde_annotations.append('skip_serializing_if = "Option::is_none"')
|
||||
|
||||
if serde_annotations:
|
||||
# Also mark optional fields for ts-rs generation.
|
||||
serde_str = f"#[serde({', '.join(serde_annotations)})]"
|
||||
else:
|
||||
serde_str = None
|
||||
return RustProp(prop_name, serde_str)
|
||||
|
||||
if is_optional and serde_str:
|
||||
ts_str = "#[ts(optional)]"
|
||||
|
||||
return RustProp(prop_name, serde_str, ts_str)
|
||||
|
||||
|
||||
def to_snake_case(name: str) -> str:
|
||||
def to_snake_case(name: str) -> str | None:
|
||||
"""Convert a camelCase or PascalCase name to snake_case."""
|
||||
snake_case = name[0].lower() + "".join("_" + c.lower() if c.isupper() else c for c in name[1:])
|
||||
if snake_case != name:
|
||||
|
||||
@@ -37,14 +37,17 @@ fn default_jsonrpc() -> String {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct Annotations {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub audience: Option<Vec<Role>>,
|
||||
#[serde(
|
||||
rename = "lastModified",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub last_modified: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub priority: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -52,6 +55,7 @@ pub struct Annotations {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct AudioContent {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
pub data: String,
|
||||
#[serde(rename = "mimeType")]
|
||||
@@ -64,6 +68,7 @@ pub struct AudioContent {
|
||||
pub struct BaseMetadata {
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
}
|
||||
|
||||
@@ -71,6 +76,7 @@ pub struct BaseMetadata {
|
||||
pub struct BlobResourceContents {
|
||||
pub blob: String,
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub uri: String,
|
||||
}
|
||||
@@ -78,10 +84,13 @@ pub struct BlobResourceContents {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct BooleanSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub default: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String, // &'static str = "boolean"
|
||||
}
|
||||
@@ -98,6 +107,7 @@ impl ModelContextProtocolRequest for CallToolRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct CallToolRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub arguments: Option<serde_json::Value>,
|
||||
pub name: String,
|
||||
}
|
||||
@@ -107,12 +117,14 @@ pub struct CallToolRequestParams {
|
||||
pub struct CallToolResult {
|
||||
pub content: Vec<ContentBlock>,
|
||||
#[serde(rename = "isError", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub is_error: Option<bool>,
|
||||
#[serde(
|
||||
rename = "structuredContent",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub structured_content: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -135,6 +147,7 @@ impl ModelContextProtocolNotification for CancelledNotification {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct CancelledNotificationParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub reason: Option<String>,
|
||||
#[serde(rename = "requestId")]
|
||||
pub request_id: RequestId,
|
||||
@@ -144,12 +157,16 @@ pub struct CancelledNotificationParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ClientCapabilities {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub elicitation: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub experimental: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub roots: Option<ClientCapabilitiesRoots>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub sampling: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -161,6 +178,7 @@ pub struct ClientCapabilitiesRoots {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub list_changed: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -228,6 +246,7 @@ impl ModelContextProtocolRequest for CompleteRequest {
|
||||
pub struct CompleteRequestParams {
|
||||
pub argument: CompleteRequestParamsArgument,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub context: Option<CompleteRequestParamsContext>,
|
||||
pub r#ref: CompleteRequestParamsRef,
|
||||
}
|
||||
@@ -236,6 +255,7 @@ pub struct CompleteRequestParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct CompleteRequestParamsContext {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub arguments: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -262,8 +282,10 @@ pub struct CompleteResult {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct CompleteResultCompletion {
|
||||
#[serde(rename = "hasMore", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub has_more: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub total: Option<i64>,
|
||||
pub values: Vec<String>,
|
||||
}
|
||||
@@ -302,31 +324,37 @@ pub struct CreateMessageRequestParams {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub include_context: Option<String>,
|
||||
#[serde(rename = "maxTokens")]
|
||||
pub max_tokens: i64,
|
||||
pub messages: Vec<SamplingMessage>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub metadata: Option<serde_json::Value>,
|
||||
#[serde(
|
||||
rename = "modelPreferences",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub model_preferences: Option<ModelPreferences>,
|
||||
#[serde(
|
||||
rename = "stopSequences",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub stop_sequences: Option<Vec<String>>,
|
||||
#[serde(
|
||||
rename = "systemPrompt",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub system_prompt: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub temperature: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -341,6 +369,7 @@ pub struct CreateMessageResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub stop_reason: Option<String>,
|
||||
}
|
||||
|
||||
@@ -385,6 +414,7 @@ pub struct ElicitRequestParams {
|
||||
pub struct ElicitRequestParamsRequestedSchema {
|
||||
pub properties: serde_json::Value,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub required: Option<Vec<String>>,
|
||||
pub r#type: String, // &'static str = "object"
|
||||
}
|
||||
@@ -394,6 +424,7 @@ pub struct ElicitRequestParamsRequestedSchema {
|
||||
pub struct ElicitResult {
|
||||
pub action: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub content: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -412,6 +443,7 @@ impl From<ElicitResult> for serde_json::Value {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct EmbeddedResource {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
pub resource: EmbeddedResourceResource,
|
||||
pub r#type: String, // &'static str = "resource"
|
||||
@@ -429,11 +461,14 @@ pub type EmptyResult = Result;
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct EnumSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
pub r#enum: Vec<String>,
|
||||
#[serde(rename = "enumNames", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub enum_names: Option<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String, // &'static str = "string"
|
||||
}
|
||||
@@ -450,6 +485,7 @@ impl ModelContextProtocolRequest for GetPromptRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct GetPromptRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub arguments: Option<serde_json::Value>,
|
||||
pub name: String,
|
||||
}
|
||||
@@ -458,6 +494,7 @@ pub struct GetPromptRequestParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct GetPromptResult {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
pub messages: Vec<PromptMessage>,
|
||||
}
|
||||
@@ -474,6 +511,7 @@ impl From<GetPromptResult> for serde_json::Value {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ImageContent {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
pub data: String,
|
||||
#[serde(rename = "mimeType")]
|
||||
@@ -486,10 +524,12 @@ pub struct ImageContent {
|
||||
pub struct Implementation {
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub version: String,
|
||||
// This is an extra field that the Codex MCP server sends as part of InitializeResult.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub user_agent: Option<String>,
|
||||
}
|
||||
|
||||
@@ -516,6 +556,7 @@ pub struct InitializeRequestParams {
|
||||
pub struct InitializeResult {
|
||||
pub capabilities: ServerCapabilities,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub instructions: Option<String>,
|
||||
#[serde(rename = "protocolVersion")]
|
||||
pub protocol_version: String,
|
||||
@@ -552,6 +593,7 @@ pub struct JSONRPCError {
|
||||
pub struct JSONRPCErrorError {
|
||||
pub code: i64,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub data: Option<serde_json::Value>,
|
||||
pub message: String,
|
||||
}
|
||||
@@ -573,6 +615,7 @@ pub struct JSONRPCNotification {
|
||||
pub jsonrpc: String,
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -584,6 +627,7 @@ pub struct JSONRPCRequest {
|
||||
pub jsonrpc: String,
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -608,6 +652,7 @@ impl ModelContextProtocolRequest for ListPromptsRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ListPromptsRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -619,6 +664,7 @@ pub struct ListPromptsResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub next_cursor: Option<String>,
|
||||
pub prompts: Vec<Prompt>,
|
||||
}
|
||||
@@ -643,6 +689,7 @@ impl ModelContextProtocolRequest for ListResourceTemplatesRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ListResourceTemplatesRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -654,6 +701,7 @@ pub struct ListResourceTemplatesResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub next_cursor: Option<String>,
|
||||
#[serde(rename = "resourceTemplates")]
|
||||
pub resource_templates: Vec<ResourceTemplate>,
|
||||
@@ -679,6 +727,7 @@ impl ModelContextProtocolRequest for ListResourcesRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ListResourcesRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -690,6 +739,7 @@ pub struct ListResourcesResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub next_cursor: Option<String>,
|
||||
pub resources: Vec<Resource>,
|
||||
}
|
||||
@@ -739,6 +789,7 @@ impl ModelContextProtocolRequest for ListToolsRequest {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ListToolsRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -750,6 +801,7 @@ pub struct ListToolsResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub next_cursor: Option<String>,
|
||||
pub tools: Vec<Tool>,
|
||||
}
|
||||
@@ -799,6 +851,7 @@ pub struct LoggingMessageNotificationParams {
|
||||
pub data: serde_json::Value,
|
||||
pub level: LoggingLevel,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub logger: Option<String>,
|
||||
}
|
||||
|
||||
@@ -809,6 +862,7 @@ pub struct LoggingMessageNotificationParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ModelHint {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
@@ -830,20 +884,24 @@ pub struct ModelPreferences {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub cost_priority: Option<f64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub hints: Option<Vec<ModelHint>>,
|
||||
#[serde(
|
||||
rename = "intelligencePriority",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub intelligence_priority: Option<f64>,
|
||||
#[serde(
|
||||
rename = "speedPriority",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub speed_priority: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -851,18 +909,23 @@ pub struct ModelPreferences {
|
||||
pub struct Notification {
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct NumberSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub maximum: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub minimum: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String,
|
||||
}
|
||||
@@ -871,12 +934,14 @@ pub struct NumberSchema {
|
||||
pub struct PaginatedRequest {
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<PaginatedRequestParams>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct PaginatedRequestParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -887,6 +952,7 @@ pub struct PaginatedResult {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
@@ -929,11 +995,13 @@ impl ModelContextProtocolNotification for ProgressNotification {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ProgressNotificationParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub message: Option<String>,
|
||||
pub progress: f64,
|
||||
#[serde(rename = "progressToken")]
|
||||
pub progress_token: ProgressToken,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub total: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -948,11 +1016,14 @@ pub enum ProgressToken {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct Prompt {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub arguments: Option<Vec<PromptArgument>>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
}
|
||||
|
||||
@@ -960,11 +1031,14 @@ pub struct Prompt {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct PromptArgument {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub required: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
}
|
||||
|
||||
@@ -991,6 +1065,7 @@ pub struct PromptMessage {
|
||||
pub struct PromptReference {
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String, // &'static str = "ref/prompt"
|
||||
}
|
||||
@@ -1034,6 +1109,7 @@ impl From<ReadResourceResult> for serde_json::Value {
|
||||
pub struct Request {
|
||||
pub method: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -1048,15 +1124,20 @@ pub enum RequestId {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct Resource {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub size: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub uri: String,
|
||||
}
|
||||
@@ -1065,6 +1146,7 @@ pub struct Resource {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ResourceContents {
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub uri: String,
|
||||
}
|
||||
@@ -1075,15 +1157,20 @@ pub struct ResourceContents {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ResourceLink {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub size: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String, // &'static str = "resource_link"
|
||||
pub uri: String,
|
||||
@@ -1101,13 +1188,17 @@ impl ModelContextProtocolNotification for ResourceListChangedNotification {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ResourceTemplate {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
#[serde(rename = "uriTemplate")]
|
||||
pub uri_template: String,
|
||||
@@ -1148,6 +1239,7 @@ pub enum Role {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct Root {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub name: Option<String>,
|
||||
pub uri: String,
|
||||
}
|
||||
@@ -1179,16 +1271,22 @@ pub enum SamplingMessageContent {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ServerCapabilities {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub completions: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub experimental: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub logging: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub prompts: Option<ServerCapabilitiesPrompts>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub resources: Option<ServerCapabilitiesResources>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub tools: Option<ServerCapabilitiesTools>,
|
||||
}
|
||||
|
||||
@@ -1200,6 +1298,7 @@ pub struct ServerCapabilitiesTools {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub list_changed: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -1211,8 +1310,10 @@ pub struct ServerCapabilitiesResources {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub list_changed: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub subscribe: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -1224,6 +1325,7 @@ pub struct ServerCapabilitiesPrompts {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub list_changed: Option<bool>,
|
||||
}
|
||||
|
||||
@@ -1298,14 +1400,19 @@ pub struct SetLevelRequestParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct StringSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub format: Option<String>,
|
||||
#[serde(rename = "maxLength", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub max_length: Option<i64>,
|
||||
#[serde(rename = "minLength", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub min_length: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
pub r#type: String, // &'static str = "string"
|
||||
}
|
||||
@@ -1328,6 +1435,7 @@ pub struct SubscribeRequestParams {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct TextContent {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<Annotations>,
|
||||
pub text: String,
|
||||
pub r#type: String, // &'static str = "text"
|
||||
@@ -1336,6 +1444,7 @@ pub struct TextContent {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct TextResourceContents {
|
||||
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub mime_type: Option<String>,
|
||||
pub text: String,
|
||||
pub uri: String,
|
||||
@@ -1345,8 +1454,10 @@ pub struct TextResourceContents {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct Tool {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub annotations: Option<ToolAnnotations>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub description: Option<String>,
|
||||
#[serde(rename = "inputSchema")]
|
||||
pub input_schema: ToolInputSchema,
|
||||
@@ -1356,8 +1467,10 @@ pub struct Tool {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub output_schema: Option<ToolOutputSchema>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
}
|
||||
|
||||
@@ -1366,8 +1479,10 @@ pub struct Tool {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ToolOutputSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub properties: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub required: Option<Vec<String>>,
|
||||
pub r#type: String, // &'static str = "object"
|
||||
}
|
||||
@@ -1376,8 +1491,10 @@ pub struct ToolOutputSchema {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct ToolInputSchema {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub properties: Option<serde_json::Value>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub required: Option<Vec<String>>,
|
||||
pub r#type: String, // &'static str = "object"
|
||||
}
|
||||
@@ -1397,26 +1514,31 @@ pub struct ToolAnnotations {
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub destructive_hint: Option<bool>,
|
||||
#[serde(
|
||||
rename = "idempotentHint",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub idempotent_hint: Option<bool>,
|
||||
#[serde(
|
||||
rename = "openWorldHint",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub open_world_hint: Option<bool>,
|
||||
#[serde(
|
||||
rename = "readOnlyHint",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(optional)]
|
||||
pub read_only_hint: Option<bool>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub title: Option<String>,
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,6 @@ impl SandboxRiskCategory {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct ExecApprovalRequestEvent {
|
||||
/// Identifier for the associated exec call, if available.
|
||||
pub call_id: String,
|
||||
@@ -79,7 +78,6 @@ pub struct ExecApprovalRequestEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct ApplyPatchApprovalRequestEvent {
|
||||
/// Responses API call id for the associated patch apply call, if available.
|
||||
pub call_id: String,
|
||||
|
||||
@@ -11,7 +11,6 @@ use ts_rs::TS;
|
||||
pub const PROMPTS_CMD_PREFIX: &str = "prompts";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct CustomPrompt {
|
||||
pub name: String,
|
||||
pub path: PathBuf,
|
||||
|
||||
@@ -48,36 +48,35 @@ pub enum ContentItem {
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum ResponseItem {
|
||||
Message {
|
||||
#[serde(skip_serializing)]
|
||||
#[ts(optional = nullable)]
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: Option<String>,
|
||||
role: String,
|
||||
content: Vec<ContentItem>,
|
||||
},
|
||||
Reasoning {
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: String,
|
||||
summary: Vec<ReasoningItemReasoningSummary>,
|
||||
#[serde(default, skip_serializing_if = "should_serialize_reasoning_content")]
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(optional)]
|
||||
content: Option<Vec<ReasoningItemContent>>,
|
||||
#[ts(optional = nullable)]
|
||||
encrypted_content: Option<String>,
|
||||
},
|
||||
LocalShellCall {
|
||||
/// Set when using the chat completions API.
|
||||
#[serde(skip_serializing)]
|
||||
#[ts(optional = nullable)]
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: Option<String>,
|
||||
/// Set when using the Responses API.
|
||||
#[ts(optional = nullable)]
|
||||
call_id: Option<String>,
|
||||
status: LocalShellStatus,
|
||||
action: LocalShellAction,
|
||||
},
|
||||
FunctionCall {
|
||||
#[serde(skip_serializing)]
|
||||
#[ts(optional = nullable)]
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: Option<String>,
|
||||
name: String,
|
||||
// The Responses API returns the function call arguments as a *string* that contains
|
||||
@@ -97,11 +96,11 @@ pub enum ResponseItem {
|
||||
output: FunctionCallOutputPayload,
|
||||
},
|
||||
CustomToolCall {
|
||||
#[serde(skip_serializing)]
|
||||
#[ts(optional = nullable)]
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(optional)]
|
||||
status: Option<String>,
|
||||
|
||||
call_id: String,
|
||||
@@ -121,11 +120,11 @@ pub enum ResponseItem {
|
||||
// "action": {"type":"search","query":"weather: San Francisco, CA"}
|
||||
// }
|
||||
WebSearchCall {
|
||||
#[serde(skip_serializing)]
|
||||
#[ts(optional = nullable)]
|
||||
#[serde(default, skip_serializing)]
|
||||
#[ts(skip)]
|
||||
id: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(optional)]
|
||||
status: Option<String>,
|
||||
action: WebSearchAction,
|
||||
},
|
||||
@@ -203,7 +202,6 @@ pub enum LocalShellAction {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct LocalShellExecAction {
|
||||
pub command: Vec<String>,
|
||||
pub timeout_ms: Option<u64>,
|
||||
@@ -296,7 +294,6 @@ impl From<Vec<UserInput>> for ResponseInputItem {
|
||||
/// If the `name` of a `ResponseItem::FunctionCall` is either `container.exec`
|
||||
/// or shell`, the `arguments` field should deserialize to this struct.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct ShellToolCallParams {
|
||||
pub command: Vec<String>,
|
||||
pub workdir: Option<String>,
|
||||
@@ -329,7 +326,6 @@ pub enum FunctionCallOutputContentItem {
|
||||
/// `content_items` with the structured form that the Responses/Chat
|
||||
/// Completions APIs understand.
|
||||
#[derive(Debug, Default, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct FunctionCallOutputPayload {
|
||||
pub content: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
|
||||
@@ -18,14 +18,11 @@ pub enum ParsedCommand {
|
||||
},
|
||||
ListFiles {
|
||||
cmd: String,
|
||||
#[ts(optional = nullable)]
|
||||
path: Option<String>,
|
||||
},
|
||||
Search {
|
||||
cmd: String,
|
||||
#[ts(optional = nullable)]
|
||||
query: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
path: Option<String>,
|
||||
},
|
||||
Unknown {
|
||||
|
||||
@@ -21,7 +21,6 @@ pub struct PlanItemArg {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct UpdatePlanArgs {
|
||||
#[serde(default)]
|
||||
pub explanation: Option<String>,
|
||||
|
||||
@@ -438,6 +438,10 @@ pub enum EventMsg {
|
||||
/// Error while executing a submission
|
||||
Error(ErrorEvent),
|
||||
|
||||
/// Warning issued while processing a submission. Unlike `Error`, this
|
||||
/// indicates the task continued but the user should still be notified.
|
||||
Warning(WarningEvent),
|
||||
|
||||
/// Agent has started a task
|
||||
TaskStarted(TaskStartedEvent),
|
||||
|
||||
@@ -661,7 +665,6 @@ impl HasLegacyEvent for EventMsg {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct ExitedReviewModeEvent {
|
||||
pub review_output: Option<ReviewOutputEvent>,
|
||||
}
|
||||
@@ -674,13 +677,16 @@ pub struct ErrorEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct WarningEvent {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
pub struct TaskCompleteEvent {
|
||||
pub last_agent_message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct TaskStartedEvent {
|
||||
pub model_context_window: Option<i64>,
|
||||
}
|
||||
@@ -700,11 +706,9 @@ pub struct TokenUsage {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct TokenUsageInfo {
|
||||
pub total_token_usage: TokenUsage,
|
||||
pub last_token_usage: TokenUsage,
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(type = "number | null")]
|
||||
pub model_context_window: Option<i64>,
|
||||
}
|
||||
@@ -765,30 +769,25 @@ impl TokenUsageInfo {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct TokenCountEvent {
|
||||
pub info: Option<TokenUsageInfo>,
|
||||
pub rate_limits: Option<RateLimitSnapshot>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct RateLimitSnapshot {
|
||||
pub primary: Option<RateLimitWindow>,
|
||||
pub secondary: Option<RateLimitWindow>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct RateLimitWindow {
|
||||
/// Percentage (0-100) of the window that has been consumed.
|
||||
pub used_percent: f64,
|
||||
/// Rolling window duration, in minutes.
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(type = "number | null")]
|
||||
pub window_minutes: Option<i64>,
|
||||
/// Unix timestamp (seconds since epoch) when the window resets.
|
||||
#[ts(optional = nullable)]
|
||||
#[ts(type = "number | null")]
|
||||
pub resets_at: Option<i64>,
|
||||
}
|
||||
@@ -902,7 +901,6 @@ pub struct AgentMessageEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct UserMessageEvent {
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -938,7 +936,6 @@ pub struct AgentReasoningDeltaEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct McpInvocation {
|
||||
/// Name of the MCP server as defined in the config.
|
||||
pub server: String,
|
||||
@@ -1058,6 +1055,8 @@ pub enum SessionSource {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
pub enum SubAgentSource {
|
||||
Review,
|
||||
Compact,
|
||||
@@ -1065,7 +1064,6 @@ pub enum SubAgentSource {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct SessionMeta {
|
||||
pub id: ConversationId,
|
||||
pub timestamp: String,
|
||||
@@ -1094,7 +1092,6 @@ impl Default for SessionMeta {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct SessionMetaLine {
|
||||
#[serde(flatten)]
|
||||
pub meta: SessionMeta,
|
||||
@@ -1130,7 +1127,6 @@ impl From<CompactedItem> for ResponseItem {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct TurnContextItem {
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
@@ -1149,7 +1145,6 @@ pub struct RolloutLine {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct GitInfo {
|
||||
/// Current commit hash (SHA)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -1283,7 +1278,6 @@ pub struct BackgroundEventEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct DeprecationNoticeEvent {
|
||||
/// Concise summary of what is deprecated.
|
||||
pub summary: String,
|
||||
@@ -1293,14 +1287,12 @@ pub struct DeprecationNoticeEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct UndoStartedEvent {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct UndoCompletedEvent {
|
||||
pub success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -1345,7 +1337,6 @@ pub struct TurnDiffEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct GetHistoryEntryResponseEvent {
|
||||
pub offset: usize,
|
||||
pub log_id: u64,
|
||||
@@ -1395,7 +1386,6 @@ pub struct ListCustomPromptsResponseEvent {
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Deserialize, Serialize, JsonSchema, TS)]
|
||||
#[ts(optional_fields = nullable)]
|
||||
pub struct SessionConfiguredEvent {
|
||||
/// Name left as session_id instead of conversation_id for backwards compatibility.
|
||||
pub session_id: ConversationId,
|
||||
@@ -1456,7 +1446,6 @@ pub enum FileChange {
|
||||
},
|
||||
Update {
|
||||
unified_diff: String,
|
||||
#[ts(optional = nullable)]
|
||||
move_path: Option<PathBuf>,
|
||||
},
|
||||
}
|
||||
|
||||
246
codex-rs/scripts/setup-windows.ps1
Normal file
246
codex-rs/scripts/setup-windows.ps1
Normal file
@@ -0,0 +1,246 @@
|
||||
<#
|
||||
Setup script for building codex-rs on Windows.
|
||||
|
||||
What it does:
|
||||
- Installs Rust toolchain (via winget rustup) and required components
|
||||
- Installs Visual Studio 2022 Build Tools (MSVC + Windows SDK)
|
||||
- Installs helpful CLIs used by the repo: git, ripgrep (rg), just, cmake
|
||||
- Installs cargo-insta (for snapshot tests) via cargo
|
||||
- Ensures PATH contains Cargo bin for the current session
|
||||
- Builds the workspace (cargo build)
|
||||
|
||||
Usage:
|
||||
- Right-click PowerShell and "Run as Administrator" (VS Build Tools require elevation)
|
||||
- From the repo root (codex-rs), run:
|
||||
powershell -ExecutionPolicy Bypass -File scripts/setup-windows.ps1
|
||||
|
||||
Notes:
|
||||
- Requires winget (Windows Package Manager). Most modern Windows 10/11 have it preinstalled.
|
||||
- The script is re-runnable; winget/cargo will skip/reinstall as appropriate.
|
||||
#>
|
||||
|
||||
param(
|
||||
[switch] $SkipBuild
|
||||
)
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
function Ensure-Command($Name) {
|
||||
$exists = Get-Command $Name -ErrorAction SilentlyContinue
|
||||
return $null -ne $exists
|
||||
}
|
||||
|
||||
function Add-CargoBinToPath() {
|
||||
$cargoBin = Join-Path $env:USERPROFILE ".cargo\bin"
|
||||
if (Test-Path $cargoBin) {
|
||||
if (-not ($env:Path.Split(';') -contains $cargoBin)) {
|
||||
$env:Path = "$env:Path;$cargoBin"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function Ensure-UserPathContains([string] $Segment) {
|
||||
try {
|
||||
$userPath = [Environment]::GetEnvironmentVariable('Path', 'User')
|
||||
if ($null -eq $userPath) { $userPath = '' }
|
||||
$parts = $userPath.Split(';') | Where-Object { $_ -ne '' }
|
||||
if (-not ($parts -contains $Segment)) {
|
||||
$newPath = if ($userPath) { "$userPath;$Segment" } else { $Segment }
|
||||
[Environment]::SetEnvironmentVariable('Path', $newPath, 'User')
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
function Ensure-UserEnvVar([string] $Name, [string] $Value) {
|
||||
try { [Environment]::SetEnvironmentVariable($Name, $Value, 'User') } catch {}
|
||||
}
|
||||
|
||||
function Ensure-VSComponents([string[]]$Components) {
|
||||
$vsInstaller = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vs_installer.exe"
|
||||
$vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe"
|
||||
if (-not (Test-Path $vsInstaller) -or -not (Test-Path $vswhere)) { return }
|
||||
|
||||
$instPath = & $vswhere -latest -products * -version "[17.0,18.0)" -requires Microsoft.VisualStudio.Workload.VCTools -property installationPath 2>$null
|
||||
if (-not $instPath) {
|
||||
# 2022 instance may be present without VC Tools; pick BuildTools 2022 and add components
|
||||
$instPath = & $vswhere -latest -products Microsoft.VisualStudio.Product.BuildTools -version "[17.0,18.0)" -property installationPath 2>$null
|
||||
}
|
||||
if (-not $instPath) {
|
||||
$instPath = & $vswhere -latest -products * -requires Microsoft.VisualStudio.Workload.VCTools -property installationPath 2>$null
|
||||
}
|
||||
if (-not $instPath) {
|
||||
$default2022 = 'C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools'
|
||||
if (Test-Path $default2022) { $instPath = $default2022 }
|
||||
}
|
||||
if (-not $instPath) { return }
|
||||
|
||||
$vsDevCmd = Join-Path $instPath 'Common7\Tools\VsDevCmd.bat'
|
||||
$verb = if (Test-Path $vsDevCmd) { 'modify' } else { 'install' }
|
||||
$args = @($verb, '--installPath', $instPath, '--quiet', '--norestart', '--nocache')
|
||||
if ($verb -eq 'install') { $args += @('--productId', 'Microsoft.VisualStudio.Product.BuildTools') }
|
||||
foreach ($c in $Components) { $args += @('--add', $c) }
|
||||
Write-Host "-- Ensuring VS components installed: $($Components -join ', ')" -ForegroundColor DarkCyan
|
||||
& $vsInstaller @args | Out-Host
|
||||
}
|
||||
|
||||
function Enter-VsDevShell() {
|
||||
$vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe"
|
||||
if (-not (Test-Path $vswhere)) { return }
|
||||
|
||||
$instPath = & $vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath 2>$null
|
||||
if (-not $instPath) {
|
||||
# Try ARM64 components
|
||||
$instPath = & $vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.ARM64 -property installationPath 2>$null
|
||||
}
|
||||
if (-not $instPath) { return }
|
||||
|
||||
$vsDevCmd = Join-Path $instPath 'Common7\Tools\VsDevCmd.bat'
|
||||
if (-not (Test-Path $vsDevCmd)) { return }
|
||||
|
||||
# Prefer ARM64 on ARM machines, otherwise x64
|
||||
$arch = if ($env:PROCESSOR_ARCHITEW6432 -eq 'ARM64' -or $env:PROCESSOR_ARCHITECTURE -eq 'ARM64') { 'arm64' } else { 'x64' }
|
||||
$devCmdStr = ('"{0}" -no_logo -arch={1} -host_arch={1} & set' -f $vsDevCmd, $arch)
|
||||
$envLines = & cmd.exe /c $devCmdStr
|
||||
foreach ($line in $envLines) {
|
||||
if ($line -match '^(.*?)=(.*)$') {
|
||||
$name = $matches[1]
|
||||
$value = $matches[2]
|
||||
try { [Environment]::SetEnvironmentVariable($name, $value, 'Process') } catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "==> Installing prerequisites via winget (may take a while)" -ForegroundColor Cyan
|
||||
|
||||
# Accept agreements up-front for non-interactive installs
|
||||
$WingetArgs = @('--accept-package-agreements', '--accept-source-agreements', '-e')
|
||||
|
||||
if (-not (Ensure-Command 'winget')) {
|
||||
throw "winget is required. Please update to the latest Windows 10/11 or install winget."
|
||||
}
|
||||
|
||||
# 1) Visual Studio 2022 Build Tools (MSVC toolchain + Windows SDK)
|
||||
# The VC Tools workload brings the required MSVC toolchains; include recommended components to pick up a Windows SDK.
|
||||
Write-Host "-- Installing Visual Studio Build Tools (VC Tools workload + ARM64 toolchains)" -ForegroundColor DarkCyan
|
||||
$vsOverride = @(
|
||||
'--quiet', '--wait', '--norestart', '--nocache',
|
||||
'--add', 'Microsoft.VisualStudio.Workload.VCTools',
|
||||
'--add', 'Microsoft.VisualStudio.Component.VC.Tools.ARM64',
|
||||
'--add', 'Microsoft.VisualStudio.Component.VC.Tools.ARM64EC',
|
||||
'--add', 'Microsoft.VisualStudio.Component.Windows11SDK.22000'
|
||||
) -join ' '
|
||||
winget install @WingetArgs --id Microsoft.VisualStudio.2022.BuildTools --override $vsOverride | Out-Host
|
||||
|
||||
# Ensure required VC components even if winget doesn't modify the instance
|
||||
$isArm64 = ($env:PROCESSOR_ARCHITEW6432 -eq 'ARM64' -or $env:PROCESSOR_ARCHITECTURE -eq 'ARM64')
|
||||
$components = @(
|
||||
'Microsoft.VisualStudio.Workload.VCTools',
|
||||
'Microsoft.VisualStudio.Component.VC.Tools.ARM64',
|
||||
'Microsoft.VisualStudio.Component.VC.Tools.ARM64EC',
|
||||
'Microsoft.VisualStudio.Component.Windows11SDK.22000'
|
||||
)
|
||||
Ensure-VSComponents -Components $components
|
||||
|
||||
# 2) Rustup
|
||||
Write-Host "-- Installing rustup" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id Rustlang.Rustup | Out-Host
|
||||
|
||||
# Make cargo available in this session
|
||||
Add-CargoBinToPath
|
||||
|
||||
# 3) Git (often present, but ensure installed)
|
||||
Write-Host "-- Installing Git" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id Git.Git | Out-Host
|
||||
|
||||
# 4) ripgrep (rg)
|
||||
Write-Host "-- Installing ripgrep (rg)" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id BurntSushi.ripgrep.MSVC | Out-Host
|
||||
|
||||
# 5) just
|
||||
Write-Host "-- Installing just" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id Casey.Just | Out-Host
|
||||
|
||||
# 6) cmake (commonly needed by native crates)
|
||||
Write-Host "-- Installing CMake" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id Kitware.CMake | Out-Host
|
||||
|
||||
# Ensure cargo is available after rustup install
|
||||
Add-CargoBinToPath
|
||||
if (-not (Ensure-Command 'cargo')) {
|
||||
# Some shells need a re-login; attempt to source cargo.env if present
|
||||
$cargoEnv = Join-Path $env:USERPROFILE ".cargo\env"
|
||||
if (Test-Path $cargoEnv) { . $cargoEnv }
|
||||
Add-CargoBinToPath
|
||||
}
|
||||
if (-not (Ensure-Command 'cargo')) {
|
||||
throw "cargo not found in PATH after rustup install. Please open a new terminal and re-run the script."
|
||||
}
|
||||
|
||||
Write-Host "==> Configuring Rust toolchain per rust-toolchain.toml" -ForegroundColor Cyan
|
||||
|
||||
# Pin to the workspace toolchain and install components
|
||||
$toolchain = '1.90.0'
|
||||
& rustup toolchain install $toolchain --profile minimal | Out-Host
|
||||
& rustup default $toolchain | Out-Host
|
||||
& rustup component add clippy rustfmt rust-src --toolchain $toolchain | Out-Host
|
||||
|
||||
# 6.5) LLVM/Clang (some crates/bindgen require clang/libclang)
|
||||
function Add-LLVMToPath() {
|
||||
$llvmBin = 'C:\\Program Files\\LLVM\\bin'
|
||||
if (Test-Path $llvmBin) {
|
||||
if (-not ($env:Path.Split(';') -contains $llvmBin)) {
|
||||
$env:Path = "$env:Path;$llvmBin"
|
||||
}
|
||||
if (-not $env:LIBCLANG_PATH) {
|
||||
$env:LIBCLANG_PATH = $llvmBin
|
||||
}
|
||||
Ensure-UserPathContains $llvmBin
|
||||
Ensure-UserEnvVar -Name 'LIBCLANG_PATH' -Value $llvmBin
|
||||
|
||||
$clang = Join-Path $llvmBin 'clang.exe'
|
||||
$clangxx = Join-Path $llvmBin 'clang++.exe'
|
||||
if (Test-Path $clang) {
|
||||
$env:CC = $clang
|
||||
Ensure-UserEnvVar -Name 'CC' -Value $clang
|
||||
}
|
||||
if (Test-Path $clangxx) {
|
||||
$env:CXX = $clangxx
|
||||
Ensure-UserEnvVar -Name 'CXX' -Value $clangxx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "-- Installing LLVM/Clang" -ForegroundColor DarkCyan
|
||||
winget install @WingetArgs --id LLVM.LLVM | Out-Host
|
||||
Add-LLVMToPath
|
||||
|
||||
# 7) cargo-insta (used by snapshot tests)
|
||||
# Ensure MSVC linker is available before building/cargo-install by entering VS dev shell
|
||||
Enter-VsDevShell
|
||||
$hasLink = $false
|
||||
try { & where.exe link | Out-Null; $hasLink = $true } catch {}
|
||||
if ($hasLink) {
|
||||
Write-Host "-- Installing cargo-insta" -ForegroundColor DarkCyan
|
||||
& cargo install cargo-insta --locked | Out-Host
|
||||
} else {
|
||||
Write-Host "-- Skipping cargo-insta for now (MSVC linker not found yet)" -ForegroundColor Yellow
|
||||
}
|
||||
|
||||
if ($SkipBuild) {
|
||||
Write-Host "==> Skipping cargo build (SkipBuild specified)" -ForegroundColor Yellow
|
||||
exit 0
|
||||
}
|
||||
|
||||
Write-Host "==> Building workspace (cargo build)" -ForegroundColor Cyan
|
||||
pushd "$PSScriptRoot\.." | Out-Null
|
||||
try {
|
||||
# Clear RUSTFLAGS if coming from constrained environments
|
||||
$env:RUSTFLAGS = ''
|
||||
Enter-VsDevShell
|
||||
& cargo build
|
||||
}
|
||||
finally {
|
||||
popd | Out-Null
|
||||
}
|
||||
|
||||
Write-Host "==> Build complete" -ForegroundColor Green
|
||||
@@ -17,7 +17,7 @@ use codex_ansi_escape::ansi_escape_line;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config_edit::ConfigEditsBuilder;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config_types::Notifications;
|
||||
use codex_core::config::types::Notifications;
|
||||
use codex_core::git_info::current_branch_name;
|
||||
use codex_core::git_info::local_git_branches;
|
||||
use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
|
||||
@@ -42,6 +42,7 @@ use codex_core::protocol::UndoCompletedEvent;
|
||||
use codex_core::protocol::UndoStartedEvent;
|
||||
use codex_core::protocol::UserMessageEvent;
|
||||
use codex_core::protocol::ViewImageToolCallEvent;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_core::protocol::WebSearchBeginEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_protocol::ConversationId;
|
||||
@@ -519,6 +520,11 @@ impl ChatWidget {
|
||||
self.maybe_send_next_queued_input();
|
||||
}
|
||||
|
||||
fn on_warning(&mut self, message: String) {
|
||||
self.add_to_history(history_cell::new_warning_event(message));
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
/// Handle a turn aborted due to user interrupt (Esc).
|
||||
/// When there are queued user messages, restore them into the composer
|
||||
/// separated by newlines rather than auto‑submitting the next one.
|
||||
@@ -657,7 +663,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_shutdown_complete(&mut self) {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
self.request_exit();
|
||||
}
|
||||
|
||||
fn on_turn_diff(&mut self, unified_diff: String) {
|
||||
@@ -1229,8 +1235,8 @@ impl ChatWidget {
|
||||
SlashCommand::Approvals => {
|
||||
self.open_approvals_popup();
|
||||
}
|
||||
SlashCommand::Quit => {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
SlashCommand::Quit | SlashCommand::Exit => {
|
||||
self.request_exit();
|
||||
}
|
||||
SlashCommand::Logout => {
|
||||
if let Err(e) = codex_core::auth::logout(
|
||||
@@ -1239,7 +1245,7 @@ impl ChatWidget {
|
||||
) {
|
||||
tracing::error!("failed to logout: {e}");
|
||||
}
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
self.request_exit();
|
||||
}
|
||||
SlashCommand::Undo => {
|
||||
self.app_event_tx.send(AppEvent::CodexOp(Op::Undo));
|
||||
@@ -1270,7 +1276,16 @@ impl ChatWidget {
|
||||
SlashCommand::Mcp => {
|
||||
self.add_mcp_output();
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
SlashCommand::Rollout => {
|
||||
if let Some(path) = self.rollout_path() {
|
||||
self.add_info_message(
|
||||
format!("Current rollout path: {}", path.display()),
|
||||
None,
|
||||
);
|
||||
} else {
|
||||
self.add_info_message("Rollout path is not available yet.".to_string(), None);
|
||||
}
|
||||
}
|
||||
SlashCommand::TestApproval => {
|
||||
use codex_core::protocol::EventMsg;
|
||||
use std::collections::HashMap;
|
||||
@@ -1468,6 +1483,7 @@ impl ChatWidget {
|
||||
self.set_token_info(ev.info);
|
||||
self.on_rate_limit_snapshot(ev.rate_limits);
|
||||
}
|
||||
EventMsg::Warning(WarningEvent { message }) => self.on_warning(message),
|
||||
EventMsg::Error(ErrorEvent { message }) => self.on_error(message),
|
||||
EventMsg::TurnAborted(ev) => match ev.reason {
|
||||
TurnAbortReason::Interrupted => {
|
||||
@@ -1584,6 +1600,10 @@ impl ChatWidget {
|
||||
}
|
||||
}
|
||||
|
||||
fn request_exit(&self) {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
|
||||
fn request_redraw(&mut self) {
|
||||
self.frame_requester.schedule_frame();
|
||||
}
|
||||
@@ -1853,7 +1873,10 @@ impl ChatWidget {
|
||||
current_approval == preset.approval && current_sandbox == preset.sandbox;
|
||||
let name = preset.label.to_string();
|
||||
let description_text = preset.description;
|
||||
let description = if cfg!(target_os = "windows") && preset.id == "auto" {
|
||||
let description = if cfg!(target_os = "windows")
|
||||
&& preset.id == "auto"
|
||||
&& codex_core::get_platform_sandbox().is_none()
|
||||
{
|
||||
Some(format!(
|
||||
"{description_text}\nRequires Windows Subsystem for Linux (WSL). Show installation instructions..."
|
||||
))
|
||||
@@ -1873,7 +1896,10 @@ impl ChatWidget {
|
||||
preset: preset_clone.clone(),
|
||||
});
|
||||
})]
|
||||
} else if cfg!(target_os = "windows") && preset.id == "auto" {
|
||||
} else if cfg!(target_os = "windows")
|
||||
&& preset.id == "auto"
|
||||
&& codex_core::get_platform_sandbox().is_none()
|
||||
{
|
||||
vec![Box::new(|tx| {
|
||||
tx.send(AppEvent::ShowWindowsAutoModeInstructions);
|
||||
})]
|
||||
|
||||
@@ -37,6 +37,7 @@ use codex_core::protocol::TaskStartedEvent;
|
||||
use codex_core::protocol::UndoCompletedEvent;
|
||||
use codex_core::protocol::UndoStartedEvent;
|
||||
use codex_core::protocol::ViewImageToolCallEvent;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
@@ -56,6 +57,8 @@ use tempfile::tempdir;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
|
||||
const TEST_WARNING_MESSAGE: &str = "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.";
|
||||
|
||||
fn test_config() -> Config {
|
||||
// Use base defaults to avoid depending on host state.
|
||||
Config::load_from_base_config_with_overrides(
|
||||
@@ -851,6 +854,24 @@ fn slash_init_skips_when_project_doc_exists() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slash_quit_requests_exit() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
|
||||
chat.dispatch_command(SlashCommand::Quit);
|
||||
|
||||
assert_matches!(rx.try_recv(), Ok(AppEvent::ExitRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slash_exit_requests_exit() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
|
||||
chat.dispatch_command(SlashCommand::Exit);
|
||||
|
||||
assert_matches!(rx.try_recv(), Ok(AppEvent::ExitRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slash_undo_sends_op() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
@@ -863,6 +884,42 @@ fn slash_undo_sends_op() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slash_rollout_displays_current_path() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
let rollout_path = PathBuf::from("/tmp/codex-test-rollout.jsonl");
|
||||
chat.current_rollout_path = Some(rollout_path.clone());
|
||||
|
||||
chat.dispatch_command(SlashCommand::Rollout);
|
||||
|
||||
let cells = drain_insert_history(&mut rx);
|
||||
assert_eq!(cells.len(), 1, "expected info message for rollout path");
|
||||
let rendered = lines_to_single_string(&cells[0]);
|
||||
assert!(
|
||||
rendered.contains(&rollout_path.display().to_string()),
|
||||
"expected rollout path to be shown: {rendered}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slash_rollout_handles_missing_path() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
|
||||
chat.dispatch_command(SlashCommand::Rollout);
|
||||
|
||||
let cells = drain_insert_history(&mut rx);
|
||||
assert_eq!(
|
||||
cells.len(),
|
||||
1,
|
||||
"expected info message explaining missing path"
|
||||
);
|
||||
let rendered = lines_to_single_string(&cells[0]);
|
||||
assert!(
|
||||
rendered.contains("not available"),
|
||||
"expected missing rollout path message: {rendered}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn undo_success_events_render_info_messages() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
@@ -1528,7 +1585,8 @@ async fn binary_size_transcript_snapshot() {
|
||||
}
|
||||
has_emitted_history = true;
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines(&mut terminal, lines);
|
||||
crate::insert_history::insert_history_lines(&mut terminal, lines)
|
||||
.expect("Failed to insert history lines in test");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1549,7 +1607,8 @@ async fn binary_size_transcript_snapshot() {
|
||||
}
|
||||
has_emitted_history = true;
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines(&mut terminal, lines);
|
||||
crate::insert_history::insert_history_lines(&mut terminal, lines)
|
||||
.expect("Failed to insert history lines in test");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2389,6 +2448,25 @@ fn stream_error_updates_status_indicator() {
|
||||
assert_eq!(status.header(), msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn warning_event_adds_warning_history_cell() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
chat.handle_codex_event(Event {
|
||||
id: "sub-1".into(),
|
||||
msg: EventMsg::Warning(WarningEvent {
|
||||
message: TEST_WARNING_MESSAGE.to_string(),
|
||||
}),
|
||||
});
|
||||
|
||||
let cells = drain_insert_history(&mut rx);
|
||||
assert_eq!(cells.len(), 1, "expected one warning history cell");
|
||||
let rendered = lines_to_single_string(&cells[0]);
|
||||
assert!(
|
||||
rendered.contains(TEST_WARNING_MESSAGE),
|
||||
"warning cell missing content: {rendered}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
@@ -2600,7 +2678,8 @@ fn chatwidget_exec_and_status_layout_vt100_snapshot() {
|
||||
term.set_viewport_area(viewport);
|
||||
|
||||
for lines in drain_insert_history(&mut rx) {
|
||||
crate::insert_history::insert_history_lines(&mut term, lines);
|
||||
crate::insert_history::insert_history_lines(&mut term, lines)
|
||||
.expect("Failed to insert history lines in test");
|
||||
}
|
||||
|
||||
term.draw(|f| {
|
||||
@@ -2677,7 +2756,8 @@ printf 'fenced within fenced\n'
|
||||
while let Ok(app_ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistoryCell(cell) = app_ev {
|
||||
let lines = cell.display_lines(width);
|
||||
crate::insert_history::insert_history_lines(&mut term, lines);
|
||||
crate::insert_history::insert_history_lines(&mut term, lines)
|
||||
.expect("Failed to insert history lines in test");
|
||||
inserted_any = true;
|
||||
}
|
||||
}
|
||||
@@ -2695,7 +2775,8 @@ printf 'fenced within fenced\n'
|
||||
}),
|
||||
});
|
||||
for lines in drain_insert_history(&mut rx) {
|
||||
crate::insert_history::insert_history_lines(&mut term, lines);
|
||||
crate::insert_history::insert_history_lines(&mut term, lines)
|
||||
.expect("Failed to insert history lines in test");
|
||||
}
|
||||
|
||||
assert_snapshot!(term.backend().vt100().screen().contents());
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user