mirror of
https://github.com/openai/codex.git
synced 2026-02-03 23:43:39 +00:00
Compare commits
38 Commits
fix-unicod
...
mcp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4243c44b22 | ||
|
|
021d9beebd | ||
|
|
a5b2ebb49b | ||
|
|
697c7cf4bf | ||
|
|
34ac698bef | ||
|
|
097782c775 | ||
|
|
8ba8089592 | ||
|
|
57c498159a | ||
|
|
bbf42f4e12 | ||
|
|
6f0b499594 | ||
|
|
236c4f76a6 | ||
|
|
dc42ec0eb4 | ||
|
|
cdc77c10fb | ||
|
|
c5d21a4564 | ||
|
|
59f6b1654f | ||
|
|
80b00a193e | ||
|
|
76dc3f6054 | ||
|
|
e4c275d615 | ||
|
|
9f71dcbf57 | ||
|
|
750ca9e21d | ||
|
|
5fac7b2566 | ||
|
|
24c7be7da0 | ||
|
|
4b4aa2a774 | ||
|
|
16d16a4ddc | ||
|
|
9604671678 | ||
|
|
db934e438e | ||
|
|
5f6e1af1a5 | ||
|
|
8ad56be06e | ||
|
|
d2b2a6d13a | ||
|
|
74683bab91 | ||
|
|
dacff9675a | ||
|
|
697b4ce100 | ||
|
|
9193eb6b53 | ||
|
|
e95cad1946 | ||
|
|
2ec5a28528 | ||
|
|
050b9baeb6 | ||
|
|
5ab30c73f3 | ||
|
|
250ae37c84 |
2
.github/workflows/codex.yml
vendored
2
.github/workflows/codex.yml
vendored
@@ -52,7 +52,7 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-dev-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
# Note it is possible that the `verify` step internal to Run Codex will
|
||||
# fail, in which case the work to setup the repo was worthless :(
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
/codex-cli/dist
|
||||
/codex-cli/node_modules
|
||||
pnpm-lock.yaml
|
||||
|
||||
prompt.md
|
||||
*_prompt.md
|
||||
*_instructions.md
|
||||
|
||||
@@ -383,6 +383,13 @@ base_url = "http://my-ollama.example.com:11434/v1"
|
||||
|
||||
### Platform sandboxing details
|
||||
|
||||
By default, Codex CLI runs code and shell commands inside a restricted sandbox to protect your system.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Not all tool calls are sandboxed. Specifically, **trusted Model Context Protocol (MCP) tool calls** are executed outside of the sandbox.
|
||||
> This is intentional: MCP tools are explicitly configured and trusted by you, and they often need to connect to **external applications or services** (e.g. issue trackers, databases, messaging systems).
|
||||
> Running them outside the sandbox allows Codex to integrate with these external systems without being blocked by sandbox restrictions.
|
||||
|
||||
The mechanism Codex uses to implement the sandbox policy depends on your OS:
|
||||
|
||||
- **macOS 12+** uses **Apple Seatbelt** and runs commands using `sandbox-exec` with a profile (`-p`) that corresponds to the `--sandbox` that was specified.
|
||||
|
||||
196
codex-rs/Cargo.lock
generated
196
codex-rs/Cargo.lock
generated
@@ -186,6 +186,26 @@ version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223"
|
||||
|
||||
[[package]]
|
||||
name = "arboard"
|
||||
version = "3.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55f533f8e0af236ffe5eb979b99381df3258853f00ba2e44b6e1955292c75227"
|
||||
dependencies = [
|
||||
"clipboard-win",
|
||||
"image",
|
||||
"log",
|
||||
"objc2",
|
||||
"objc2-app-kit",
|
||||
"objc2-core-foundation",
|
||||
"objc2-core-graphics",
|
||||
"objc2-foundation",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"windows-sys 0.59.0",
|
||||
"x11rb",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arg_enum_proc_macro"
|
||||
version = "0.3.4"
|
||||
@@ -737,6 +757,7 @@ dependencies = [
|
||||
"tree-sitter-bash",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
"which",
|
||||
"whoami",
|
||||
"wildmatch",
|
||||
"wiremock",
|
||||
@@ -753,6 +774,7 @@ dependencies = [
|
||||
"codex-arg0",
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
"codex-login",
|
||||
"codex-ollama",
|
||||
"codex-protocol",
|
||||
"core_test_support",
|
||||
@@ -822,6 +844,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"codex-protocol",
|
||||
"pretty_assertions",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
@@ -900,13 +923,16 @@ dependencies = [
|
||||
name = "codex-protocol"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"mcp-types",
|
||||
"mime_guess",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"strum 0.27.2",
|
||||
"strum_macros 0.27.2",
|
||||
"tracing",
|
||||
"ts-rs",
|
||||
"uuid",
|
||||
]
|
||||
@@ -926,6 +952,7 @@ name = "codex-tui"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arboard",
|
||||
"async-stream",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
@@ -960,6 +987,7 @@ dependencies = [
|
||||
"strum 0.27.2",
|
||||
"strum_macros 0.27.2",
|
||||
"supports-color",
|
||||
"tempfile",
|
||||
"textwrap 0.16.2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1408,6 +1436,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dispatch2"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"objc2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "display_container"
|
||||
version = "0.9.0"
|
||||
@@ -1861,6 +1899,16 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gethostname"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getopts"
|
||||
version = "0.2.23"
|
||||
@@ -3057,6 +3105,42 @@ dependencies = [
|
||||
"objc2-encode",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-app-kit"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6f29f568bec459b0ddff777cec4fe3fd8666d82d5a40ebd0ff7e66134f89bcc"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"objc2",
|
||||
"objc2-core-graphics",
|
||||
"objc2-foundation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-core-foundation"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"dispatch2",
|
||||
"objc2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-core-graphics"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "989c6c68c13021b5c2d6b71456ebb0f9dc78d752e86a98da7c716f4f9470f5a4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"dispatch2",
|
||||
"objc2",
|
||||
"objc2-core-foundation",
|
||||
"objc2-io-surface",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-encode"
|
||||
version = "4.1.0"
|
||||
@@ -3071,6 +3155,18 @@ checksum = "900831247d2fe1a09a683278e5384cfb8c80c79fe6b166f9d14bfdde0ea1b03c"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"objc2",
|
||||
"objc2-core-foundation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-io-surface"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7282e9ac92529fa3457ce90ebb15f4ecbc383e8338060960760fa2cf75420c3c"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"objc2",
|
||||
"objc2-core-foundation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3792,9 +3888,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.22"
|
||||
version = "0.12.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
|
||||
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
@@ -4186,9 +4282,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.142"
|
||||
version = "1.0.143"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
|
||||
dependencies = [
|
||||
"indexmap 2.10.0",
|
||||
"itoa",
|
||||
@@ -5599,6 +5695,18 @@ version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3"
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "6.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f"
|
||||
dependencies = [
|
||||
"either",
|
||||
"home",
|
||||
"rustix 0.38.44",
|
||||
"winsafe",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "whoami"
|
||||
version = "1.6.0"
|
||||
@@ -5832,6 +5940,21 @@ dependencies = [
|
||||
"windows_x86_64_msvc 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.48.5",
|
||||
"windows_aarch64_msvc 0.48.5",
|
||||
"windows_i686_gnu 0.48.5",
|
||||
"windows_i686_msvc 0.48.5",
|
||||
"windows_x86_64_gnu 0.48.5",
|
||||
"windows_x86_64_gnullvm 0.48.5",
|
||||
"windows_x86_64_msvc 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
@@ -5870,6 +5993,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
@@ -5888,6 +6017,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
@@ -5906,6 +6041,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
@@ -5936,6 +6077,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
@@ -5954,6 +6101,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
@@ -5972,6 +6125,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
@@ -5990,6 +6149,12 @@ version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
@@ -6011,6 +6176,12 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winsafe"
|
||||
version = "0.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.4"
|
||||
@@ -6050,6 +6221,23 @@ version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
|
||||
|
||||
[[package]]
|
||||
name = "x11rb"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12"
|
||||
dependencies = [
|
||||
"gethostname",
|
||||
"rustix 0.38.44",
|
||||
"x11rb-protocol",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "x11rb-protocol"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d"
|
||||
|
||||
[[package]]
|
||||
name = "yaml-rust"
|
||||
version = "0.4.5"
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
To edit files, ALWAYS use the `shell` tool with `apply_patch` CLI. `apply_patch` effectively allows you to execute a diff/patch against a file, but the format of the diff specification is unique to this task, so pay careful attention to these instructions. To use the `apply_patch` CLI, you should call the shell tool with the following structure:
|
||||
## `apply_patch`
|
||||
|
||||
```bash
|
||||
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n[YOUR_PATCH]\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
|
||||
```
|
||||
Use the `apply_patch` shell command to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
Where [YOUR_PATCH] is the actual content of your patch, specified in the following V4A diff format.
|
||||
*** Begin Patch
|
||||
[ one or more file sections ]
|
||||
*** End Patch
|
||||
|
||||
*** [ACTION] File: [path/to/file] -> ACTION can be one of Add, Update, or Delete.
|
||||
For each snippet of code that needs to be changed, repeat the following:
|
||||
[context_before] -> See below for further instructions on context.
|
||||
- [old_code] -> Precede the old code with a minus sign.
|
||||
+ [new_code] -> Precede the new, replacement code with a plus sign.
|
||||
[context_after] -> See below for further instructions on context.
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
*** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by *** Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
For instructions on [context_before] and [context_after]:
|
||||
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines.
|
||||
@@ -25,16 +31,45 @@ For instructions on [context_before] and [context_after]:
|
||||
- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance:
|
||||
|
||||
@@ class BaseClass
|
||||
@@ def method():
|
||||
@@ def method():
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
Note, then, that we do not use line numbers in this diff format, as the context is enough to uniquely identify code. An example of a message that you might pass as "input" to this function, in order to apply a patch, is shown below.
|
||||
The full grammar definition is below:
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "*** Begin Patch" NEWLINE
|
||||
End := "*** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "*** Delete File: " path NEWLINE
|
||||
UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "*** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.txt
|
||||
+Hello world
|
||||
*** Update File: src/app.py
|
||||
*** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
|
||||
You can invoke apply_patch like:
|
||||
|
||||
```bash
|
||||
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n*** Update File: pygorithm/searching/binary_search.py\\n@@ class BaseClass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n@@ class Subclass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
|
||||
```
|
||||
|
||||
File references can only be relative, NEVER ABSOLUTE. After the apply_patch command is run, it will always say "Done!", regardless of whether the patch was successfully applied or not. However, you can determine if there are issue and errors by looking at any warnings or logging lines printed BEFORE the "Done!" is output.
|
||||
shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]}
|
||||
```
|
||||
|
||||
@@ -159,7 +159,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Mcp) => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe).await?;
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, cli.config_overrides).await?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides);
|
||||
|
||||
@@ -9,6 +9,7 @@ use codex_core::config::ConfigOverrides;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Submission;
|
||||
use codex_login::AuthManager;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::BufReader;
|
||||
use tracing::error;
|
||||
@@ -36,7 +37,10 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
|
||||
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
|
||||
// Use conversation_manager API to start a conversation
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
|
||||
@@ -243,6 +243,25 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
|
||||
model_reasoning_summary = "none" # disable reasoning summaries
|
||||
```
|
||||
|
||||
## model_verbosity
|
||||
|
||||
Controls output length/detail on GPT‑5 family models when using the Responses API. Supported values:
|
||||
|
||||
- `"low"`
|
||||
- `"medium"` (default when omitted)
|
||||
- `"high"`
|
||||
|
||||
When set, Codex includes a `text` object in the request payload with the configured verbosity, for example: `"text": { "verbosity": "low" }`.
|
||||
|
||||
Example:
|
||||
|
||||
```toml
|
||||
model = "gpt-5"
|
||||
model_verbosity = "low"
|
||||
```
|
||||
|
||||
Note: This applies only to providers using the Responses API. Chat Completions providers are unaffected.
|
||||
|
||||
## model_supports_reasoning_summaries
|
||||
|
||||
By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`:
|
||||
|
||||
@@ -71,6 +71,9 @@ openssl-sys = { version = "*", features = ["vendored"] }
|
||||
[target.aarch64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { version = "*", features = ["vendored"] }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
which = "6"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
core_test_support = { path = "tests/common" }
|
||||
|
||||
@@ -270,67 +270,6 @@ When using the shell, you must adhere to the following guidelines:
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
- Read files in chunks with a max chunk size of 250 lines. Do not use python scripts to attempt to output larger chunks of a file. Command line output will be truncated after 10 kilobytes or 256 lines of output, regardless of the command used.
|
||||
|
||||
## `apply_patch`
|
||||
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
**_ Begin Patch
|
||||
[ one or more file sections ]
|
||||
_** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
**_ Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
_** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
\*\*\* Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by \*\*\* Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
- for inserted text,
|
||||
|
||||
* for removed text, or
|
||||
space ( ) for context.
|
||||
At the end of a truncated hunk you can emit \*\*\* End of File.
|
||||
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "**_ Begin Patch" NEWLINE
|
||||
End := "_** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "**_ Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "_** Delete File: " path NEWLINE
|
||||
UpdateFile := "**_ Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "_** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
**_ Begin Patch
|
||||
_** Add File: hello.txt
|
||||
+Hello world
|
||||
**_ Update File: src/app.py
|
||||
_** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
**_ Delete File: obsolete.txt
|
||||
_** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
|
||||
You can invoke apply_patch like:
|
||||
|
||||
```
|
||||
shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]}
|
||||
```
|
||||
|
||||
## `update_plan`
|
||||
|
||||
A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::protocol::FileChange;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_patch_safety;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::ApplyPatchFileChange;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
|
||||
@@ -22,11 +22,11 @@ use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ReasoningItemContent;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::create_tools_json_for_chat_completions_api;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Implementation for the classic Chat Completions API.
|
||||
pub(crate) async fn stream_chat_completions(
|
||||
@@ -102,6 +102,33 @@ pub(crate) async fn stream_chat_completions(
|
||||
"content": output.content,
|
||||
}));
|
||||
}
|
||||
ResponseItem::CustomToolCall {
|
||||
id,
|
||||
call_id: _,
|
||||
name,
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": output,
|
||||
}));
|
||||
}
|
||||
ResponseItem::Reasoning { .. } | ResponseItem::Other => {
|
||||
// Omit these items from the conversation history.
|
||||
continue;
|
||||
@@ -482,16 +509,19 @@ where
|
||||
// do NOT emit yet. Forward any other item (e.g. FunctionCall) right
|
||||
// away so downstream consumers see it.
|
||||
|
||||
let is_assistant_delta = matches!(&item, crate::models::ResponseItem::Message { role, .. } if role == "assistant");
|
||||
let is_assistant_delta = matches!(&item, codex_protocol::models::ResponseItem::Message { role, .. } if role == "assistant");
|
||||
|
||||
if is_assistant_delta {
|
||||
// Only use the final assistant message if we have not
|
||||
// seen any deltas; otherwise, deltas already built the
|
||||
// cumulative text and this would duplicate it.
|
||||
if this.cumulative.is_empty()
|
||||
&& let crate::models::ResponseItem::Message { content, .. } = &item
|
||||
&& let codex_protocol::models::ResponseItem::Message { content, .. } =
|
||||
&item
|
||||
&& let Some(text) = content.iter().find_map(|c| match c {
|
||||
crate::models::ContentItem::OutputText { text } => Some(text),
|
||||
codex_protocol::models::ContentItem::OutputText { text } => {
|
||||
Some(text)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
{
|
||||
@@ -515,26 +545,27 @@ where
|
||||
if !this.cumulative_reasoning.is_empty()
|
||||
&& matches!(this.mode, AggregateMode::AggregatedOnly)
|
||||
{
|
||||
let aggregated_reasoning = crate::models::ResponseItem::Reasoning {
|
||||
id: String::new(),
|
||||
summary: Vec::new(),
|
||||
content: Some(vec![
|
||||
crate::models::ReasoningItemContent::ReasoningText {
|
||||
text: std::mem::take(&mut this.cumulative_reasoning),
|
||||
},
|
||||
]),
|
||||
encrypted_content: None,
|
||||
};
|
||||
let aggregated_reasoning =
|
||||
codex_protocol::models::ResponseItem::Reasoning {
|
||||
id: String::new(),
|
||||
summary: Vec::new(),
|
||||
content: Some(vec![
|
||||
codex_protocol::models::ReasoningItemContent::ReasoningText {
|
||||
text: std::mem::take(&mut this.cumulative_reasoning),
|
||||
},
|
||||
]),
|
||||
encrypted_content: None,
|
||||
};
|
||||
this.pending
|
||||
.push_back(ResponseEvent::OutputItemDone(aggregated_reasoning));
|
||||
emitted_any = true;
|
||||
}
|
||||
|
||||
if !this.cumulative.is_empty() {
|
||||
let aggregated_message = crate::models::ResponseItem::Message {
|
||||
let aggregated_message = codex_protocol::models::ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![crate::models::ContentItem::OutputText {
|
||||
content: vec![codex_protocol::models::ContentItem::OutputText {
|
||||
text: std::mem::take(&mut this.cumulative),
|
||||
}],
|
||||
};
|
||||
|
||||
@@ -4,8 +4,8 @@ use std::sync::OnceLock;
|
||||
use std::time::Duration;
|
||||
|
||||
use bytes::Bytes;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CodexAuth;
|
||||
use eventsource_stream::Eventsource;
|
||||
use futures::prelude::*;
|
||||
use regex_lite::Regex;
|
||||
@@ -28,6 +28,7 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::client_common::ResponsesApiRequest;
|
||||
use crate::client_common::create_reasoning_param_for_request;
|
||||
use crate::client_common::create_text_param_for_request;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
@@ -36,13 +37,13 @@ use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::create_tools_json_for_responses_api;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::user_agent::get_codex_user_agent;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -60,7 +61,7 @@ struct Error {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
client: reqwest::Client,
|
||||
provider: ModelProviderInfo,
|
||||
session_id: Uuid,
|
||||
@@ -71,7 +72,7 @@ pub struct ModelClient {
|
||||
impl ModelClient {
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
provider: ModelProviderInfo,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
@@ -79,7 +80,7 @@ impl ModelClient {
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
auth,
|
||||
auth_manager,
|
||||
client: reqwest::Client::new(),
|
||||
provider,
|
||||
session_id,
|
||||
@@ -140,7 +141,8 @@ impl ModelClient {
|
||||
return stream_from_fixture(path, self.provider.clone()).await;
|
||||
}
|
||||
|
||||
let auth = self.auth.clone();
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let auth = auth_manager.as_ref().and_then(|m| m.auth());
|
||||
|
||||
let auth_mode = auth.as_ref().map(|a| a.mode);
|
||||
|
||||
@@ -164,6 +166,19 @@ impl ModelClient {
|
||||
|
||||
let input_with_instructions = prompt.get_formatted_input();
|
||||
|
||||
// Only include `text.verbosity` for GPT-5 family models
|
||||
let text = if self.config.model_family.family == "gpt-5" {
|
||||
create_text_param_for_request(self.config.model_verbosity)
|
||||
} else {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored for non-gpt-5 model family: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
let payload = ResponsesApiRequest {
|
||||
model: &self.config.model,
|
||||
instructions: &full_instructions,
|
||||
@@ -176,6 +191,7 @@ impl ModelClient {
|
||||
stream: true,
|
||||
include,
|
||||
prompt_cache_key: Some(self.session_id.to_string()),
|
||||
text,
|
||||
};
|
||||
|
||||
let mut attempt = 0;
|
||||
@@ -249,9 +265,10 @@ impl ModelClient {
|
||||
.and_then(|s| s.parse::<u64>().ok());
|
||||
|
||||
if status == StatusCode::UNAUTHORIZED
|
||||
&& let Some(a) = auth.as_ref()
|
||||
&& let Some(manager) = auth_manager.as_ref()
|
||||
&& manager.auth().is_some()
|
||||
{
|
||||
let _ = a.refresh_token().await;
|
||||
let _ = manager.refresh_token().await;
|
||||
}
|
||||
|
||||
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
|
||||
@@ -338,8 +355,8 @@ impl ModelClient {
|
||||
self.summary
|
||||
}
|
||||
|
||||
pub fn get_auth(&self) -> Option<CodexAuth> {
|
||||
self.auth.clone()
|
||||
pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> {
|
||||
self.auth_manager.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -558,6 +575,8 @@ async fn process_sse<S>(
|
||||
}
|
||||
"response.content_part.done"
|
||||
| "response.function_call_arguments.delta"
|
||||
| "response.custom_tool_call_input.delta"
|
||||
| "response.custom_tool_call_input.done" // also emitted as response.output_item.done
|
||||
| "response.in_progress"
|
||||
| "response.output_item.added"
|
||||
| "response.output_text.done" => {
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use crate::config_types::Verbosity as VerbosityConfig;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::protocol::TokenUsage;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use futures::Stream;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
@@ -47,7 +48,18 @@ impl Prompt {
|
||||
.as_deref()
|
||||
.unwrap_or(BASE_INSTRUCTIONS);
|
||||
let mut sections: Vec<&str> = vec![base];
|
||||
if model.needs_special_apply_patch_instructions {
|
||||
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if either:
|
||||
// - the model needs special instructions (4.1), or
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
OpenAiTool::Function(f) => f.name == "apply_patch",
|
||||
OpenAiTool::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
&& (model.needs_special_apply_patch_instructions || !is_apply_patch_tool_present)
|
||||
{
|
||||
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
|
||||
}
|
||||
Cow::Owned(sections.join("\n"))
|
||||
@@ -89,6 +101,32 @@ pub(crate) struct Reasoning {
|
||||
pub(crate) summary: ReasoningSummaryConfig,
|
||||
}
|
||||
|
||||
/// Controls under the `text` field in the Responses API for GPT-5.
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
pub(crate) struct TextControls {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) verbosity: Option<OpenAiVerbosity>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum OpenAiVerbosity {
|
||||
Low,
|
||||
#[default]
|
||||
Medium,
|
||||
High,
|
||||
}
|
||||
|
||||
impl From<VerbosityConfig> for OpenAiVerbosity {
|
||||
fn from(v: VerbosityConfig) -> Self {
|
||||
match v {
|
||||
VerbosityConfig::Low => OpenAiVerbosity::Low,
|
||||
VerbosityConfig::Medium => OpenAiVerbosity::Medium,
|
||||
VerbosityConfig::High => OpenAiVerbosity::High,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request object that is serialized as JSON and POST'ed when using the
|
||||
/// Responses API.
|
||||
#[derive(Debug, Serialize)]
|
||||
@@ -109,6 +147,8 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
pub(crate) include: Vec<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) prompt_cache_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) text: Option<TextControls>,
|
||||
}
|
||||
|
||||
pub(crate) fn create_reasoning_param_for_request(
|
||||
@@ -123,6 +163,14 @@ pub(crate) fn create_reasoning_param_for_request(
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_text_param_for_request(
|
||||
verbosity: Option<VerbosityConfig>,
|
||||
) -> Option<TextControls> {
|
||||
verbosity.map(|v| TextControls {
|
||||
verbosity: Some(v.into()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct ResponseStream {
|
||||
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
|
||||
}
|
||||
@@ -151,4 +199,57 @@ mod tests {
|
||||
let full = prompt.get_full_instructions(&model_family);
|
||||
assert_eq!(full, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serializes_text_verbosity_when_set() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
let tools: Vec<serde_json::Value> = vec![];
|
||||
let req = ResponsesApiRequest {
|
||||
model: "gpt-5",
|
||||
instructions: "i",
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning: None,
|
||||
store: true,
|
||||
stream: true,
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: Some(TextControls {
|
||||
verbosity: Some(OpenAiVerbosity::Low),
|
||||
}),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
assert_eq!(
|
||||
v.get("text")
|
||||
.and_then(|t| t.get("verbosity"))
|
||||
.and_then(|s| s.as_str()),
|
||||
Some("low")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn omits_text_when_not_set() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
let tools: Vec<serde_json::Value> = vec![];
|
||||
let req = ResponsesApiRequest {
|
||||
model: "gpt-5",
|
||||
instructions: "i",
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning: None,
|
||||
store: true,
|
||||
stream: true,
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: None,
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
assert!(v.get("text").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ use async_channel::Sender;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use futures::prelude::*;
|
||||
@@ -56,14 +56,6 @@ use crate::exec_env::create_env;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::mcp_tool_call::handle_mcp_tool_call;
|
||||
use crate::model_family::find_family_for_model;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::LocalShellAction;
|
||||
use crate::models::ReasoningItemContent;
|
||||
use crate::models::ReasoningItemReasoningSummary;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::models::ShellToolCallParams;
|
||||
use crate::openai_tools::ApplyPatchToolArgs;
|
||||
use crate::openai_tools::ToolsConfig;
|
||||
use crate::openai_tools::get_openai_tools;
|
||||
@@ -94,6 +86,7 @@ use crate::protocol::PatchApplyEndEvent;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
use crate::protocol::StreamErrorEvent;
|
||||
use crate::protocol::Submission;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TurnDiffEvent;
|
||||
@@ -107,6 +100,14 @@ use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::LocalShellAction;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
|
||||
// A convenience extension trait for acquiring mutex locks where poisoning is
|
||||
// unrecoverable and should abort the program. This avoids scattered `.unwrap()`
|
||||
@@ -143,7 +144,10 @@ pub(crate) const INITIAL_SUBMIT_ID: &str = "";
|
||||
|
||||
impl Codex {
|
||||
/// Spawn a new [`Codex`] and initialize the session.
|
||||
pub async fn spawn(config: Config, auth: Option<CodexAuth>) -> CodexResult<CodexSpawnOk> {
|
||||
pub async fn spawn(
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
) -> CodexResult<CodexSpawnOk> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(64);
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
|
||||
@@ -168,13 +172,17 @@ impl Codex {
|
||||
};
|
||||
|
||||
// Generate a unique ID for the lifetime of this Codex session.
|
||||
let (session, turn_context) =
|
||||
Session::new(configure_session, config.clone(), auth, tx_event.clone())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to create session: {e:#}");
|
||||
CodexErr::InternalAgentDied
|
||||
})?;
|
||||
let (session, turn_context) = Session::new(
|
||||
configure_session,
|
||||
config.clone(),
|
||||
auth_manager.clone(),
|
||||
tx_event.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to create session: {e:#}");
|
||||
CodexErr::InternalAgentDied
|
||||
})?;
|
||||
let session_id = session.session_id;
|
||||
|
||||
// This task will run until Op::Shutdown is received.
|
||||
@@ -322,7 +330,7 @@ impl Session {
|
||||
async fn new(
|
||||
configure_session: ConfigureSession,
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
tx_event: Sender<Event>,
|
||||
) -> anyhow::Result<(Arc<Self>, TurnContext)> {
|
||||
let ConfigureSession {
|
||||
@@ -466,7 +474,7 @@ impl Session {
|
||||
// construct the model client.
|
||||
let client = ModelClient::new(
|
||||
config.clone(),
|
||||
auth.clone(),
|
||||
Some(auth_manager.clone()),
|
||||
provider.clone(),
|
||||
model_reasoning_effort,
|
||||
model_reasoning_summary,
|
||||
@@ -508,9 +516,10 @@ impl Session {
|
||||
conversation_items.push(Prompt::format_user_instructions_message(user_instructions));
|
||||
}
|
||||
conversation_items.push(ResponseItem::from(EnvironmentContext::new(
|
||||
turn_context.cwd.to_path_buf(),
|
||||
turn_context.approval_policy,
|
||||
turn_context.sandbox_policy.clone(),
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
Some(sess.user_shell.clone()),
|
||||
)));
|
||||
sess.record_conversation_items(&conversation_items).await;
|
||||
|
||||
@@ -814,6 +823,16 @@ impl Session {
|
||||
let _ = self.tx_event.send(event).await;
|
||||
}
|
||||
|
||||
async fn notify_stream_error(&self, sub_id: &str, message: impl Into<String>) {
|
||||
let event = Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::StreamError(StreamErrorEvent {
|
||||
message: message.into(),
|
||||
}),
|
||||
};
|
||||
let _ = self.tx_event.send(event).await;
|
||||
}
|
||||
|
||||
/// Build the full turn input by concatenating the current conversation
|
||||
/// history with additional items for this turn.
|
||||
pub fn turn_input_with_history(&self, extra: Vec<ResponseItem>) -> Vec<ResponseItem> {
|
||||
@@ -1022,7 +1041,8 @@ async fn submission_loop(
|
||||
let effective_effort = effort.unwrap_or(prev.client.get_reasoning_effort());
|
||||
let effective_summary = summary.unwrap_or(prev.client.get_reasoning_summary());
|
||||
|
||||
let auth = prev.client.get_auth();
|
||||
let auth_manager = prev.client.get_auth_manager();
|
||||
|
||||
// Build updated config for the client
|
||||
let mut updated_config = (*config).clone();
|
||||
updated_config.model = effective_model.clone();
|
||||
@@ -1030,7 +1050,7 @@ async fn submission_loop(
|
||||
|
||||
let client = ModelClient::new(
|
||||
Arc::new(updated_config),
|
||||
auth,
|
||||
auth_manager,
|
||||
provider,
|
||||
effective_effort,
|
||||
effective_summary,
|
||||
@@ -1067,9 +1087,11 @@ async fn submission_loop(
|
||||
turn_context = Arc::new(new_turn_context);
|
||||
if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() {
|
||||
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
|
||||
new_cwd,
|
||||
new_approval_policy,
|
||||
new_sandbox_policy,
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
))])
|
||||
.await;
|
||||
}
|
||||
@@ -1384,29 +1406,38 @@ async fn run_task(
|
||||
},
|
||||
);
|
||||
}
|
||||
(
|
||||
ResponseItem::CustomToolCall { .. },
|
||||
Some(ResponseInputItem::CustomToolCallOutput { call_id, output }),
|
||||
) => {
|
||||
items_to_record_in_conversation_history.push(item);
|
||||
items_to_record_in_conversation_history.push(
|
||||
ResponseItem::CustomToolCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
output: output.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
(
|
||||
ResponseItem::FunctionCall { .. },
|
||||
Some(ResponseInputItem::McpToolCallOutput { call_id, result }),
|
||||
) => {
|
||||
items_to_record_in_conversation_history.push(item);
|
||||
let (content, success): (String, Option<bool>) = match result {
|
||||
Ok(CallToolResult {
|
||||
content,
|
||||
is_error,
|
||||
structured_content: _,
|
||||
}) => match serde_json::to_string(content) {
|
||||
Ok(content) => (content, *is_error),
|
||||
Err(e) => {
|
||||
warn!("Failed to serialize MCP tool call output: {e}");
|
||||
(e.to_string(), Some(true))
|
||||
}
|
||||
let output = match result {
|
||||
Ok(call_tool_result) => {
|
||||
convert_call_tool_result_to_function_call_output_payload(
|
||||
call_tool_result,
|
||||
)
|
||||
}
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: err.clone(),
|
||||
success: Some(false),
|
||||
},
|
||||
Err(e) => (e.clone(), Some(true)),
|
||||
};
|
||||
items_to_record_in_conversation_history.push(
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
output: FunctionCallOutputPayload { content, success },
|
||||
output,
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -1520,7 +1551,7 @@ async fn run_turn(
|
||||
// Surface retry information to any UI/front‑end so the
|
||||
// user understands what is happening instead of staring
|
||||
// at a seemingly frozen screen.
|
||||
sess.notify_background_event(
|
||||
sess.notify_stream_error(
|
||||
&sub_id,
|
||||
format!(
|
||||
"stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…"
|
||||
@@ -1564,6 +1595,7 @@ async fn try_run_turn(
|
||||
call_id: Some(call_id),
|
||||
..
|
||||
} => Some(call_id),
|
||||
ResponseItem::CustomToolCallOutput { call_id, .. } => Some(call_id),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -1581,6 +1613,7 @@ async fn try_run_turn(
|
||||
call_id: Some(call_id),
|
||||
..
|
||||
} => Some(call_id),
|
||||
ResponseItem::CustomToolCall { call_id, .. } => Some(call_id),
|
||||
_ => None,
|
||||
})
|
||||
.filter_map(|call_id| {
|
||||
@@ -1590,12 +1623,9 @@ async fn try_run_turn(
|
||||
Some(call_id.clone())
|
||||
}
|
||||
})
|
||||
.map(|call_id| ResponseItem::FunctionCallOutput {
|
||||
.map(|call_id| ResponseItem::CustomToolCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "aborted".to_string(),
|
||||
success: Some(false),
|
||||
},
|
||||
output: "aborted".to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
@@ -1755,7 +1785,7 @@ async fn run_compact_task(
|
||||
if retries < max_retries {
|
||||
retries += 1;
|
||||
let delay = backoff(retries);
|
||||
sess.notify_background_event(
|
||||
sess.notify_stream_error(
|
||||
&sub_id,
|
||||
format!(
|
||||
"stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…"
|
||||
@@ -1860,7 +1890,7 @@ async fn handle_response_item(
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
info!("FunctionCall: {arguments}");
|
||||
info!("FunctionCall: {name}({arguments})");
|
||||
Some(
|
||||
handle_function_call(
|
||||
sess,
|
||||
@@ -1917,10 +1947,32 @@ async fn handle_response_item(
|
||||
.await,
|
||||
)
|
||||
}
|
||||
ResponseItem::CustomToolCall {
|
||||
id: _,
|
||||
call_id,
|
||||
name,
|
||||
input,
|
||||
status: _,
|
||||
} => Some(
|
||||
handle_custom_tool_call(
|
||||
sess,
|
||||
turn_context,
|
||||
turn_diff_tracker,
|
||||
sub_id.to_string(),
|
||||
name,
|
||||
input,
|
||||
call_id,
|
||||
)
|
||||
.await,
|
||||
),
|
||||
ResponseItem::FunctionCallOutput { .. } => {
|
||||
debug!("unexpected FunctionCallOutput from stream");
|
||||
None
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { .. } => {
|
||||
debug!("unexpected CustomToolCallOutput from stream");
|
||||
None
|
||||
}
|
||||
ResponseItem::Other => None,
|
||||
};
|
||||
Ok(output)
|
||||
@@ -2010,6 +2062,58 @@ async fn handle_function_call(
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_custom_tool_call(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
turn_diff_tracker: &mut TurnDiffTracker,
|
||||
sub_id: String,
|
||||
name: String,
|
||||
input: String,
|
||||
call_id: String,
|
||||
) -> ResponseInputItem {
|
||||
info!("CustomToolCall: {name} {input}");
|
||||
match name.as_str() {
|
||||
"apply_patch" => {
|
||||
let exec_params = ExecParams {
|
||||
command: vec!["apply_patch".to_string(), input.clone()],
|
||||
cwd: turn_context.cwd.clone(),
|
||||
timeout_ms: None,
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
};
|
||||
let resp = handle_container_exec_with_params(
|
||||
exec_params,
|
||||
sess,
|
||||
turn_context,
|
||||
turn_diff_tracker,
|
||||
sub_id,
|
||||
call_id,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Convert function-call style output into a custom tool call output
|
||||
match resp {
|
||||
ResponseInputItem::FunctionCallOutput { call_id, output } => {
|
||||
ResponseInputItem::CustomToolCallOutput {
|
||||
call_id,
|
||||
output: output.content,
|
||||
}
|
||||
}
|
||||
// Pass through if already a custom tool output or other variant
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
debug!("unexpected CustomToolCall from stream");
|
||||
ResponseInputItem::CustomToolCallOutput {
|
||||
call_id,
|
||||
output: format!("unsupported custom tool call: {name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn to_exec_params(params: ShellToolCallParams, turn_context: &TurnContext) -> ExecParams {
|
||||
ExecParams {
|
||||
command: params.command,
|
||||
@@ -2051,18 +2155,20 @@ pub struct ExecInvokeArgs<'a> {
|
||||
pub stdout_stream: Option<StdoutStream>,
|
||||
}
|
||||
|
||||
fn maybe_run_with_user_profile(
|
||||
fn maybe_translate_shell_command(
|
||||
params: ExecParams,
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
) -> ExecParams {
|
||||
if turn_context.shell_environment_policy.use_profile {
|
||||
let command = sess
|
||||
let should_translate = matches!(sess.user_shell, crate::shell::Shell::PowerShell(_))
|
||||
|| turn_context.shell_environment_policy.use_profile;
|
||||
|
||||
if should_translate
|
||||
&& let Some(command) = sess
|
||||
.user_shell
|
||||
.format_default_shell_invocation(params.command.clone());
|
||||
if let Some(command) = command {
|
||||
return ExecParams { command, ..params };
|
||||
}
|
||||
.format_default_shell_invocation(params.command.clone())
|
||||
{
|
||||
return ExecParams { command, ..params };
|
||||
}
|
||||
params
|
||||
}
|
||||
@@ -2227,7 +2333,7 @@ async fn handle_container_exec_with_params(
|
||||
),
|
||||
};
|
||||
|
||||
let params = maybe_run_with_user_profile(params, sess, turn_context);
|
||||
let params = maybe_translate_shell_command(params, sess, turn_context);
|
||||
let output_result = sess
|
||||
.run_exec_with_events(
|
||||
turn_diff_tracker,
|
||||
@@ -2530,3 +2636,132 @@ async fn drain_to_completed(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_call_tool_result_to_function_call_output_payload(
|
||||
call_tool_result: &CallToolResult,
|
||||
) -> FunctionCallOutputPayload {
|
||||
let CallToolResult {
|
||||
content,
|
||||
is_error,
|
||||
structured_content,
|
||||
} = call_tool_result;
|
||||
|
||||
// In terms of what to send back to the model, we prefer structured_content,
|
||||
// if available, and fallback to content, otherwise.
|
||||
let mut is_success = is_error != &Some(true);
|
||||
let content = if let Some(structured_content) = structured_content
|
||||
&& structured_content != &serde_json::Value::Null
|
||||
&& let Ok(serialized_structured_content) = serde_json::to_string(&structured_content)
|
||||
{
|
||||
serialized_structured_content
|
||||
} else {
|
||||
match serde_json::to_string(&content) {
|
||||
Ok(serialized_content) => serialized_content,
|
||||
Err(err) => {
|
||||
// If we could not serialize either content or structured_content to
|
||||
// JSON, flag this as an error.
|
||||
is_success = false;
|
||||
err.to_string()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
FunctionCallOutputPayload {
|
||||
content,
|
||||
success: Some(is_success),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use mcp_types::ContentBlock;
|
||||
use mcp_types::TextContent;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
|
||||
fn text_block(s: &str) -> ContentBlock {
|
||||
ContentBlock::TextContent(TextContent {
|
||||
annotations: None,
|
||||
text: s.to_string(),
|
||||
r#type: "text".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefers_structured_content_when_present() {
|
||||
let ctr = CallToolResult {
|
||||
// Content present but should be ignored because structured_content is set.
|
||||
content: vec![text_block("ignored")],
|
||||
is_error: None,
|
||||
structured_content: Some(json!({
|
||||
"ok": true,
|
||||
"value": 42
|
||||
})),
|
||||
};
|
||||
|
||||
let got = convert_call_tool_result_to_function_call_output_payload(&ctr);
|
||||
let expected = FunctionCallOutputPayload {
|
||||
content: serde_json::to_string(&json!({
|
||||
"ok": true,
|
||||
"value": 42
|
||||
}))
|
||||
.unwrap(),
|
||||
success: Some(true),
|
||||
};
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn falls_back_to_content_when_structured_is_null() {
|
||||
let ctr = CallToolResult {
|
||||
content: vec![text_block("hello"), text_block("world")],
|
||||
is_error: None,
|
||||
structured_content: Some(serde_json::Value::Null),
|
||||
};
|
||||
|
||||
let got = convert_call_tool_result_to_function_call_output_payload(&ctr);
|
||||
let expected = FunctionCallOutputPayload {
|
||||
content: serde_json::to_string(&vec![text_block("hello"), text_block("world")])
|
||||
.unwrap(),
|
||||
success: Some(true),
|
||||
};
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn success_flag_reflects_is_error_true() {
|
||||
let ctr = CallToolResult {
|
||||
content: vec![text_block("unused")],
|
||||
is_error: Some(true),
|
||||
structured_content: Some(json!({ "message": "bad" })),
|
||||
};
|
||||
|
||||
let got = convert_call_tool_result_to_function_call_output_payload(&ctr);
|
||||
let expected = FunctionCallOutputPayload {
|
||||
content: serde_json::to_string(&json!({ "message": "bad" })).unwrap(),
|
||||
success: Some(false),
|
||||
};
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn success_flag_true_with_no_error_and_content_used() {
|
||||
let ctr = CallToolResult {
|
||||
content: vec![text_block("alpha")],
|
||||
is_error: Some(false),
|
||||
structured_content: None,
|
||||
};
|
||||
|
||||
let got = convert_call_tool_result_to_function_call_output_payload(&ctr);
|
||||
let expected = FunctionCallOutputPayload {
|
||||
content: serde_json::to_string(&vec![text_block("alpha")]).unwrap(),
|
||||
success: Some(true),
|
||||
};
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::config_types::ShellEnvironmentPolicyToml;
|
||||
use crate::config_types::Tui;
|
||||
use crate::config_types::UriBasedFileOpener;
|
||||
use crate::config_types::Verbosity;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::model_family::find_family_for_model;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
@@ -150,6 +152,9 @@ pub struct Config {
|
||||
/// request using the Responses API.
|
||||
pub model_reasoning_summary: ReasoningSummary,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: String,
|
||||
|
||||
@@ -259,10 +264,61 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
// Mark the project as trusted. toml_edit is very good at handling
|
||||
// missing properties
|
||||
// Ensure we render a human-friendly structure:
|
||||
//
|
||||
// [projects]
|
||||
// [projects."/path/to/project"]
|
||||
// trust_level = "trusted"
|
||||
//
|
||||
// rather than inline tables like:
|
||||
//
|
||||
// [projects]
|
||||
// "/path/to/project" = { trust_level = "trusted" }
|
||||
let project_key = project_path.to_string_lossy().to_string();
|
||||
doc["projects"][project_key.as_str()]["trust_level"] = toml_edit::value("trusted");
|
||||
|
||||
// Ensure top-level `projects` exists as a non-inline, explicit table. If it
|
||||
// exists but was previously represented as a non-table (e.g., inline),
|
||||
// replace it with an explicit table.
|
||||
let mut created_projects_table = false;
|
||||
{
|
||||
let root = doc.as_table_mut();
|
||||
let needs_table = !root.contains_key("projects")
|
||||
|| root.get("projects").and_then(|i| i.as_table()).is_none();
|
||||
if needs_table {
|
||||
root.insert("projects", toml_edit::table());
|
||||
created_projects_table = true;
|
||||
}
|
||||
}
|
||||
let Some(projects_tbl) = doc["projects"].as_table_mut() else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"projects table missing after initialization"
|
||||
));
|
||||
};
|
||||
|
||||
// If we created the `projects` table ourselves, keep it implicit so we
|
||||
// don't render a standalone `[projects]` header.
|
||||
if created_projects_table {
|
||||
projects_tbl.set_implicit(true);
|
||||
}
|
||||
|
||||
// Ensure the per-project entry is its own explicit table. If it exists but
|
||||
// is not a table (e.g., an inline table), replace it with an explicit table.
|
||||
let needs_proj_table = !projects_tbl.contains_key(project_key.as_str())
|
||||
|| projects_tbl
|
||||
.get(project_key.as_str())
|
||||
.and_then(|i| i.as_table())
|
||||
.is_none();
|
||||
if needs_proj_table {
|
||||
projects_tbl.insert(project_key.as_str(), toml_edit::table());
|
||||
}
|
||||
let Some(proj_tbl) = projects_tbl
|
||||
.get_mut(project_key.as_str())
|
||||
.and_then(|i| i.as_table_mut())
|
||||
else {
|
||||
return Err(anyhow::anyhow!("project table missing for {}", project_key));
|
||||
};
|
||||
proj_tbl.set_implicit(false);
|
||||
proj_tbl["trust_level"] = toml_edit::value("trusted");
|
||||
|
||||
// ensure codex_home exists
|
||||
std::fs::create_dir_all(codex_home)?;
|
||||
@@ -398,6 +454,8 @@ pub struct ConfigToml {
|
||||
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
@@ -454,10 +512,27 @@ impl ConfigToml {
|
||||
pub fn is_cwd_trusted(&self, resolved_cwd: &Path) -> bool {
|
||||
let projects = self.projects.clone().unwrap_or_default();
|
||||
|
||||
projects
|
||||
.get(&resolved_cwd.to_string_lossy().to_string())
|
||||
.map(|p| p.trust_level.clone().unwrap_or("".to_string()) == "trusted")
|
||||
.unwrap_or(false)
|
||||
let is_path_trusted = |path: &Path| {
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
projects
|
||||
.get(&path_str)
|
||||
.map(|p| p.trust_level.as_deref() == Some("trusted"))
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
// Fast path: exact cwd match
|
||||
if is_path_trusted(resolved_cwd) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If cwd lives inside a git worktree, check whether the root git project
|
||||
// (the primary repository working directory) is trusted. This lets
|
||||
// worktrees inherit trust from the main project.
|
||||
if let Some(root_project) = resolve_root_git_project_for_trust(resolved_cwd) {
|
||||
return is_path_trusted(&root_project);
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn get_config_profile(
|
||||
@@ -597,7 +672,7 @@ impl Config {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries,
|
||||
uses_local_shell_tool: false,
|
||||
uses_apply_patch_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
}
|
||||
});
|
||||
|
||||
@@ -624,9 +699,6 @@ impl Config {
|
||||
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
||||
let base_instructions = base_instructions.or(file_base_instructions);
|
||||
|
||||
let include_apply_patch_tool_val =
|
||||
include_apply_patch_tool.unwrap_or(model_family.uses_apply_patch_tool);
|
||||
|
||||
let responses_originator_header: String = cfg
|
||||
.responses_originator_header_internal_override
|
||||
.unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
|
||||
@@ -675,7 +747,7 @@ impl Config {
|
||||
.model_reasoning_summary
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url)
|
||||
@@ -683,7 +755,7 @@ impl Config {
|
||||
|
||||
experimental_resume,
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool_val,
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
responses_originator_header,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
};
|
||||
@@ -1044,6 +1116,7 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::High,
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
@@ -1097,6 +1170,7 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
@@ -1165,6 +1239,7 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
@@ -1178,4 +1253,74 @@ disable_response_storage = true
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
|
||||
// Call the function under test
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
|
||||
// Read back the generated config.toml and assert exact contents
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{}'", raw_path)
|
||||
} else {
|
||||
format!("\"{}\"", raw_path)
|
||||
};
|
||||
let expected = format!(
|
||||
r#"[projects.{path_str}]
|
||||
trust_level = "trusted"
|
||||
"#
|
||||
);
|
||||
assert_eq!(contents, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_converts_inline_to_explicit() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
|
||||
// Seed config.toml with an inline project entry under [projects]
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{}'", raw_path)
|
||||
} else {
|
||||
format!("\"{}\"", raw_path)
|
||||
};
|
||||
// Use a quoted key so backslashes don't require escaping on Windows
|
||||
let initial = format!(
|
||||
r#"[projects]
|
||||
{path_str} = {{ trust_level = "untrusted" }}
|
||||
"#
|
||||
);
|
||||
std::fs::create_dir_all(codex_home.path())?;
|
||||
std::fs::write(&config_path, initial)?;
|
||||
|
||||
// Run the function; it should convert to explicit tables and set trusted
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
|
||||
// Assert exact output after conversion to explicit table
|
||||
let expected = format!(
|
||||
r#"[projects]
|
||||
|
||||
[projects.{path_str}]
|
||||
trust_level = "trusted"
|
||||
"#
|
||||
);
|
||||
assert_eq!(contents, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// No test enforcing the presence of a standalone [projects] header.
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use serde::Deserialize;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::config_types::Verbosity;
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
@@ -17,6 +18,7 @@ pub struct ConfigProfile {
|
||||
pub disable_response_storage: Option<bool>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ use std::path::PathBuf;
|
||||
use wildmatch::WildMatchPattern;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display;
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct McpServerConfig {
|
||||
@@ -183,3 +185,43 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum ReasoningEffort {
|
||||
Low,
|
||||
#[default]
|
||||
Medium,
|
||||
High,
|
||||
/// Option to disable reasoning.
|
||||
None,
|
||||
}
|
||||
|
||||
/// A summary of the reasoning performed by the model. This can be useful for
|
||||
/// debugging and understanding the model's reasoning process.
|
||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum ReasoningSummary {
|
||||
#[default]
|
||||
Auto,
|
||||
Concise,
|
||||
Detailed,
|
||||
/// Option to disable reasoning summaries.
|
||||
None,
|
||||
}
|
||||
|
||||
/// Controls output length/detail on GPT-5 models via the Responses API.
|
||||
/// Serialized with lowercase values to match the OpenAI API.
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum Verbosity {
|
||||
Low,
|
||||
#[default]
|
||||
Medium,
|
||||
High,
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::models::ResponseItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Transcript of conversation history
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@@ -66,7 +66,7 @@ impl ConversationHistory {
|
||||
self.items.push(ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![crate::models::ContentItem::OutputText {
|
||||
content: vec![codex_protocol::models::ContentItem::OutputText {
|
||||
text: delta.to_string(),
|
||||
}],
|
||||
});
|
||||
@@ -110,6 +110,8 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
ResponseItem::Message { role, .. } => role.as_str() != "system",
|
||||
ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::Reasoning { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
@@ -118,11 +120,11 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
|
||||
/// Helper to append the textual content from `src` into `dst` in place.
|
||||
fn append_text_content(
|
||||
dst: &mut Vec<crate::models::ContentItem>,
|
||||
src: &Vec<crate::models::ContentItem>,
|
||||
dst: &mut Vec<codex_protocol::models::ContentItem>,
|
||||
src: &Vec<codex_protocol::models::ContentItem>,
|
||||
) {
|
||||
for c in src {
|
||||
if let crate::models::ContentItem::OutputText { text } = c {
|
||||
if let codex_protocol::models::ContentItem::OutputText { text } = c {
|
||||
append_text_delta(dst, text);
|
||||
}
|
||||
}
|
||||
@@ -130,15 +132,15 @@ fn append_text_content(
|
||||
|
||||
/// Append a single text delta to the last OutputText item in `content`, or
|
||||
/// push a new OutputText item if none exists.
|
||||
fn append_text_delta(content: &mut Vec<crate::models::ContentItem>, delta: &str) {
|
||||
if let Some(crate::models::ContentItem::OutputText { text }) = content
|
||||
fn append_text_delta(content: &mut Vec<codex_protocol::models::ContentItem>, delta: &str) {
|
||||
if let Some(codex_protocol::models::ContentItem::OutputText { text }) = content
|
||||
.iter_mut()
|
||||
.rev()
|
||||
.find(|c| matches!(c, crate::models::ContentItem::OutputText { .. }))
|
||||
.find(|c| matches!(c, codex_protocol::models::ContentItem::OutputText { .. }))
|
||||
{
|
||||
text.push_str(delta);
|
||||
} else {
|
||||
content.push(crate::models::ContentItem::OutputText {
|
||||
content.push(codex_protocol::models::ContentItem::OutputText {
|
||||
text: delta.to_string(),
|
||||
});
|
||||
}
|
||||
@@ -147,7 +149,7 @@ fn append_text_delta(content: &mut Vec<crate::models::ContentItem>, delta: &str)
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::models::ContentItem;
|
||||
use codex_protocol::models::ContentItem;
|
||||
|
||||
fn assistant_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CodexAuth;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
@@ -28,33 +29,37 @@ pub struct NewConversation {
|
||||
/// maintaining them in memory.
|
||||
pub struct ConversationManager {
|
||||
conversations: Arc<RwLock<HashMap<Uuid, Arc<CodexConversation>>>>,
|
||||
}
|
||||
|
||||
impl Default for ConversationManager {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
conversations: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
auth_manager: Arc<AuthManager>,
|
||||
}
|
||||
|
||||
impl ConversationManager {
|
||||
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
|
||||
let auth = CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method)?;
|
||||
self.new_conversation_with_auth(config, auth).await
|
||||
pub fn new(auth_manager: Arc<AuthManager>) -> Self {
|
||||
Self {
|
||||
conversations: Arc::new(RwLock::new(HashMap::new())),
|
||||
auth_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for integration tests: should not be used by ordinary business
|
||||
/// logic.
|
||||
pub async fn new_conversation_with_auth(
|
||||
/// Construct with a dummy AuthManager containing the provided CodexAuth.
|
||||
/// Used for integration tests: should not be used by ordinary business logic.
|
||||
pub fn with_auth(auth: CodexAuth) -> Self {
|
||||
Self::new(codex_login::AuthManager::from_auth_for_testing(auth))
|
||||
}
|
||||
|
||||
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
|
||||
self.spawn_conversation(config, self.auth_manager.clone())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn spawn_conversation(
|
||||
&self,
|
||||
config: Config,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
session_id: conversation_id,
|
||||
} = Codex::spawn(config, auth).await?;
|
||||
} = Codex::spawn(config, auth_manager).await?;
|
||||
|
||||
// The first event must be `SessionInitialized`. Validate and forward it
|
||||
// to the caller so that they can display it in the conversation
|
||||
|
||||
@@ -2,16 +2,16 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display as DeriveDisplay;
|
||||
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use std::fmt::Display;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// wraps environment context message in a tag for the model to parse more easily.
|
||||
pub(crate) const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n";
|
||||
pub(crate) const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>";
|
||||
pub(crate) const ENVIRONMENT_CONTEXT_END: &str = "</environment_context>";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, DeriveDisplay)]
|
||||
@@ -24,52 +24,87 @@ pub enum NetworkAccess {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "environment_context", rename_all = "snake_case")]
|
||||
pub(crate) struct EnvironmentContext {
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
pub sandbox_mode: SandboxMode,
|
||||
pub network_access: NetworkAccess,
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub network_access: Option<NetworkAccess>,
|
||||
pub shell: Option<Shell>,
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
pub fn new(
|
||||
cwd: PathBuf,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
cwd: Option<PathBuf>,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
shell: Option<Shell>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode: match sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess => SandboxMode::DangerFullAccess,
|
||||
SandboxPolicy::ReadOnly => SandboxMode::ReadOnly,
|
||||
SandboxPolicy::WorkspaceWrite { .. } => SandboxMode::WorkspaceWrite,
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(SandboxMode::ReadOnly),
|
||||
Some(SandboxPolicy::WorkspaceWrite { .. }) => Some(SandboxMode::WorkspaceWrite),
|
||||
None => None,
|
||||
},
|
||||
network_access: match sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess => NetworkAccess::Enabled,
|
||||
SandboxPolicy::ReadOnly => NetworkAccess::Restricted,
|
||||
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(NetworkAccess::Enabled),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(NetworkAccess::Restricted),
|
||||
Some(SandboxPolicy::WorkspaceWrite { network_access, .. }) => {
|
||||
if network_access {
|
||||
NetworkAccess::Enabled
|
||||
Some(NetworkAccess::Enabled)
|
||||
} else {
|
||||
NetworkAccess::Restricted
|
||||
Some(NetworkAccess::Restricted)
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
shell,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for EnvironmentContext {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Current working directory: {}",
|
||||
self.cwd.to_string_lossy()
|
||||
)?;
|
||||
writeln!(f, "Approval policy: {}", self.approval_policy)?;
|
||||
writeln!(f, "Sandbox mode: {}", self.sandbox_mode)?;
|
||||
writeln!(f, "Network access: {}", self.network_access)?;
|
||||
Ok(())
|
||||
impl EnvironmentContext {
|
||||
/// Serializes the environment context to XML. Libraries like `quick-xml`
|
||||
/// require custom macros to handle Enums with newtypes, so we just do it
|
||||
/// manually, to keep things simple. Output looks like:
|
||||
///
|
||||
/// ```xml
|
||||
/// <environment_context>
|
||||
/// <cwd>...</cwd>
|
||||
/// <approval_policy>...</approval_policy>
|
||||
/// <sandbox_mode>...</sandbox_mode>
|
||||
/// <network_access>...</network_access>
|
||||
/// <shell>...</shell>
|
||||
/// </environment_context>
|
||||
/// ```
|
||||
pub fn serialize_to_xml(self) -> String {
|
||||
let mut lines = vec![ENVIRONMENT_CONTEXT_START.to_string()];
|
||||
if let Some(cwd) = self.cwd {
|
||||
lines.push(format!(" <cwd>{}</cwd>", cwd.to_string_lossy()));
|
||||
}
|
||||
if let Some(approval_policy) = self.approval_policy {
|
||||
lines.push(format!(
|
||||
" <approval_policy>{}</approval_policy>",
|
||||
approval_policy
|
||||
));
|
||||
}
|
||||
if let Some(sandbox_mode) = self.sandbox_mode {
|
||||
lines.push(format!(" <sandbox_mode>{}</sandbox_mode>", sandbox_mode));
|
||||
}
|
||||
if let Some(network_access) = self.network_access {
|
||||
lines.push(format!(
|
||||
" <network_access>{}</network_access>",
|
||||
network_access
|
||||
));
|
||||
}
|
||||
if let Some(shell) = self.shell
|
||||
&& let Some(shell_name) = shell.name()
|
||||
{
|
||||
lines.push(format!(" <shell>{}</shell>", shell_name));
|
||||
}
|
||||
lines.push(ENVIRONMENT_CONTEXT_END.to_string());
|
||||
lines.join("\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +114,7 @@ impl From<EnvironmentContext> for ResponseItem {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"),
|
||||
text: ec.serialize_to_xml(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::GitSha;
|
||||
use futures::future::join_all;
|
||||
@@ -425,6 +426,38 @@ async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> {
|
||||
Some(diff)
|
||||
}
|
||||
|
||||
/// Resolve the path that should be used for trust checks. Similar to
|
||||
/// `[utils::is_inside_git_repo]`, but resolves to the root of the main
|
||||
/// repository. Handles worktrees.
|
||||
pub fn resolve_root_git_project_for_trust(cwd: &Path) -> Option<PathBuf> {
|
||||
let base = if cwd.is_dir() { cwd } else { cwd.parent()? };
|
||||
|
||||
// TODO: we should make this async, but it's primarily used deep in
|
||||
// callstacks of sync code, and should almost always be fast
|
||||
let git_dir_out = std::process::Command::new("git")
|
||||
.args(["rev-parse", "--git-common-dir"])
|
||||
.current_dir(base)
|
||||
.output()
|
||||
.ok()?;
|
||||
if !git_dir_out.status.success() {
|
||||
return None;
|
||||
}
|
||||
let git_dir_s = String::from_utf8(git_dir_out.stdout)
|
||||
.ok()?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let git_dir_path_raw = if Path::new(&git_dir_s).is_absolute() {
|
||||
PathBuf::from(&git_dir_s)
|
||||
} else {
|
||||
base.join(&git_dir_s)
|
||||
};
|
||||
|
||||
// Normalize to handle macOS /var vs /private/var and resolve ".." segments.
|
||||
let git_dir_path = std::fs::canonicalize(&git_dir_path_raw).unwrap_or(git_dir_path_raw);
|
||||
git_dir_path.parent().map(Path::to_path_buf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -732,6 +765,80 @@ mod tests {
|
||||
assert_eq!(state.sha, GitSha::new(&remote_sha));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_root_git_project_for_trust_returns_none_outside_repo() {
|
||||
let tmp = TempDir::new().expect("tempdir");
|
||||
assert!(resolve_root_git_project_for_trust(tmp.path()).is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resolve_root_git_project_for_trust_regular_repo_returns_repo_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap().to_path_buf();
|
||||
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&repo_path),
|
||||
Some(expected.clone())
|
||||
);
|
||||
let nested = repo_path.join("sub/dir");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&nested),
|
||||
Some(expected.clone())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resolve_root_git_project_for_trust_detects_worktree_and_returns_main_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
|
||||
// Create a linked worktree
|
||||
let wt_root = temp_dir.path().join("wt");
|
||||
let _ = std::process::Command::new("git")
|
||||
.args([
|
||||
"worktree",
|
||||
"add",
|
||||
wt_root.to_str().unwrap(),
|
||||
"-b",
|
||||
"feature/x",
|
||||
])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.expect("git worktree add");
|
||||
|
||||
let expected = std::fs::canonicalize(&repo_path).ok();
|
||||
let got = resolve_root_git_project_for_trust(&wt_root)
|
||||
.and_then(|p| std::fs::canonicalize(p).ok());
|
||||
assert_eq!(got, expected);
|
||||
let nested = wt_root.join("nested/sub");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
let got_nested =
|
||||
resolve_root_git_project_for_trust(&nested).and_then(|p| std::fs::canonicalize(p).ok());
|
||||
assert_eq!(got_nested, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_root_git_project_for_trust_non_worktrees_gitdir_returns_none() {
|
||||
let tmp = TempDir::new().expect("tempdir");
|
||||
let proj = tmp.path().join("proj");
|
||||
std::fs::create_dir_all(proj.join("nested")).unwrap();
|
||||
|
||||
// `.git` is a file but does not point to a worktrees path
|
||||
std::fs::write(
|
||||
proj.join(".git"),
|
||||
format!(
|
||||
"gitdir: {}\n",
|
||||
tmp.path().join("some/other/location").display()
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(resolve_root_git_project_for_trust(&proj).is_none());
|
||||
assert!(resolve_root_git_project_for_trust(&proj.join("nested")).is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_git_working_tree_state_unpushed_commit() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
|
||||
@@ -39,17 +39,17 @@ mod conversation_manager;
|
||||
pub use conversation_manager::ConversationManager;
|
||||
pub use conversation_manager::NewConversation;
|
||||
pub mod model_family;
|
||||
mod models;
|
||||
mod openai_model_info;
|
||||
mod openai_tools;
|
||||
pub mod plan_tool;
|
||||
mod project_doc;
|
||||
pub mod project_doc;
|
||||
mod rollout;
|
||||
pub(crate) mod safety;
|
||||
pub mod seatbelt;
|
||||
pub mod shell;
|
||||
pub mod spawn;
|
||||
pub mod terminal;
|
||||
mod tool_apply_patch;
|
||||
pub mod turn_diff_tracker;
|
||||
pub mod user_agent;
|
||||
mod user_notification;
|
||||
|
||||
@@ -4,13 +4,13 @@ use std::time::Instant;
|
||||
use tracing::error;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::McpInvocation;
|
||||
use crate::protocol::McpToolCallBeginEvent;
|
||||
use crate::protocol::McpToolCallEndEvent;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
|
||||
/// Handles the specified tool call dispatches the appropriate
|
||||
/// `McpToolCallBegin` and `McpToolCallEnd` events to the `Session`.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ModelFamily {
|
||||
@@ -24,9 +26,9 @@ pub struct ModelFamily {
|
||||
// See https://platform.openai.com/docs/guides/tools-local-shell
|
||||
pub uses_local_shell_tool: bool,
|
||||
|
||||
/// True if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command.
|
||||
pub uses_apply_patch_tool: bool,
|
||||
/// Present if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
}
|
||||
|
||||
macro_rules! model_family {
|
||||
@@ -40,7 +42,7 @@ macro_rules! model_family {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
uses_local_shell_tool: false,
|
||||
uses_apply_patch_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
};
|
||||
// apply overrides
|
||||
$(
|
||||
@@ -60,7 +62,7 @@ macro_rules! simple_model_family {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
uses_local_shell_tool: false,
|
||||
uses_apply_patch_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
})
|
||||
}};
|
||||
}
|
||||
@@ -88,6 +90,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
)
|
||||
} else if slug.starts_with("gpt-4.1") {
|
||||
model_family!(
|
||||
@@ -95,7 +98,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("gpt-oss") {
|
||||
model_family!(slug, "gpt-oss", uses_apply_patch_tool: true)
|
||||
model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function))
|
||||
} else if slug.starts_with("gpt-4o") {
|
||||
simple_model_family!(slug, "gpt-4o")
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
@@ -104,6 +107,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(
|
||||
slug, "gpt-5",
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
|
||||
@@ -9,6 +9,9 @@ use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
pub struct ResponsesApiTool {
|
||||
@@ -21,6 +24,20 @@ pub struct ResponsesApiTool {
|
||||
pub(crate) parameters: JsonSchema,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) format: FreeformToolFormat,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformToolFormat {
|
||||
pub(crate) r#type: String,
|
||||
pub(crate) syntax: String,
|
||||
pub(crate) definition: String,
|
||||
}
|
||||
|
||||
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
|
||||
/// Responses API.
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
@@ -30,6 +47,8 @@ pub(crate) enum OpenAiTool {
|
||||
Function(ResponsesApiTool),
|
||||
#[serde(rename = "local_shell")]
|
||||
LocalShell {},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -43,7 +62,7 @@ pub enum ConfigShellToolType {
|
||||
pub struct ToolsConfig {
|
||||
pub shell_type: ConfigShellToolType,
|
||||
pub plan_tool: bool,
|
||||
pub apply_patch_tool: bool,
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
}
|
||||
|
||||
impl ToolsConfig {
|
||||
@@ -65,10 +84,22 @@ impl ToolsConfig {
|
||||
}
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
|
||||
Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function),
|
||||
None => {
|
||||
if include_apply_patch_tool {
|
||||
Some(ApplyPatchToolType::Freeform)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
shell_type,
|
||||
plan_tool: include_plan_tool,
|
||||
apply_patch_tool: include_apply_patch_tool || model_family.uses_apply_patch_tool,
|
||||
apply_patch_tool_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,16 +146,20 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
"command".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::String { description: None }),
|
||||
description: None,
|
||||
description: Some("The command to execute".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String { description: None },
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout".to_string(),
|
||||
JsonSchema::Number { description: None },
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
@@ -155,7 +190,7 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout".to_string(),
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
@@ -171,7 +206,7 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if ask_for_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -237,92 +272,16 @@ The shell tool is used to execute shell commands.
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// TODO(dylan): deprecate once we get rid of json tool
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct ApplyPatchToolArgs {
|
||||
pub(crate) input: String,
|
||||
}
|
||||
|
||||
fn create_apply_patch_tool() -> OpenAiTool {
|
||||
// Minimal schema: one required string argument containing the patch body
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"input".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: r#"Use this tool to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
**_ Begin Patch
|
||||
[ one or more file sections ]
|
||||
_** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
**_ Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
_** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
\*\*\* Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by \*\*\* Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
- for inserted text,
|
||||
|
||||
* for removed text, or
|
||||
space ( ) for context.
|
||||
At the end of a truncated hunk you can emit \*\*\* End of File.
|
||||
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "**_ Begin Patch" NEWLINE
|
||||
End := "_** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "**_ Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "_** Delete File: " path NEWLINE
|
||||
UpdateFile := "**_ Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "_** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
**_ Begin Patch
|
||||
_** Add File: hello.txt
|
||||
+Hello world
|
||||
**_ Update File: src/app.py
|
||||
_** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
**_ Delete File: obsolete.txt
|
||||
_** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
"#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["input".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns JSON values that are compatible with Function Calling in the
|
||||
/// Responses API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
|
||||
pub(crate) fn create_tools_json_for_responses_api(
|
||||
pub fn create_tools_json_for_responses_api(
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
let mut tools_json = Vec::new();
|
||||
@@ -539,8 +498,15 @@ pub(crate) fn get_openai_tools(
|
||||
tools.push(PLAN_TOOL.clone());
|
||||
}
|
||||
|
||||
if config.apply_patch_tool {
|
||||
tools.push(create_apply_patch_tool());
|
||||
if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type {
|
||||
match apply_patch_tool_type {
|
||||
ApplyPatchToolType::Freeform => {
|
||||
tools.push(create_apply_patch_freeform_tool());
|
||||
}
|
||||
ApplyPatchToolType::Function => {
|
||||
tools.push(create_apply_patch_json_tool());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mcp_tools) = mcp_tools {
|
||||
@@ -571,6 +537,7 @@ mod tests {
|
||||
.map(|tool| match tool {
|
||||
OpenAiTool::Function(ResponsesApiTool { name, .. }) => name,
|
||||
OpenAiTool::LocalShell {} => "local_shell",
|
||||
OpenAiTool::Freeform(FreeformTool { name, .. }) => name,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -596,7 +563,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
@@ -611,7 +578,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
@@ -626,7 +593,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -720,7 +687,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -776,7 +743,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -827,7 +794,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
@@ -881,7 +848,7 @@ mod tests {
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
model_family.uses_apply_patch_tool,
|
||||
false,
|
||||
);
|
||||
|
||||
let tools = get_openai_tools(
|
||||
|
||||
@@ -2,13 +2,13 @@ use std::collections::BTreeMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
|
||||
// Use the canonical plan tool types from the protocol crate to ensure
|
||||
// type-identity matches events transported via `codex_protocol`.
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
//! Project-level documentation discovery.
|
||||
//!
|
||||
//! Project-level documentation can be stored in a file named `AGENTS.md`.
|
||||
//! Currently, we include only the contents of the first file found as follows:
|
||||
//! Project-level documentation can be stored in files named `AGENTS.md`.
|
||||
//! We include the concatenation of all files found along the path from the
|
||||
//! repository root to the current working directory as follows:
|
||||
//!
|
||||
//! 1. Look for the doc file in the current working directory (as determined
|
||||
//! by the `Config`).
|
||||
//! 2. If not found, walk *upwards* until the Git repository root is reached
|
||||
//! (detected by the presence of a `.git` directory/file), or failing that,
|
||||
//! the filesystem root.
|
||||
//! 3. If the Git root is encountered, look for the doc file there. If it
|
||||
//! exists, the search stops – we do **not** walk past the Git root.
|
||||
//! 1. Determine the Git repository root by walking upwards from the current
|
||||
//! working directory until a `.git` directory or file is found. If no Git
|
||||
//! root is found, only the current working directory is considered.
|
||||
//! 2. Collect every `AGENTS.md` found from the repository root down to the
|
||||
//! current working directory (inclusive) and concatenate their contents in
|
||||
//! that order.
|
||||
//! 3. We do **not** walk past the Git root.
|
||||
|
||||
use crate::config::Config;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tracing::error;
|
||||
|
||||
@@ -26,7 +27,7 @@ const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n";
|
||||
/// Combines `Config::instructions` and `AGENTS.md` (if present) into a single
|
||||
/// string of instructions.
|
||||
pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
match find_project_doc(config).await {
|
||||
match read_project_docs(config).await {
|
||||
Ok(Some(project_doc)) => match &config.user_instructions {
|
||||
Some(original_instructions) => Some(format!(
|
||||
"{original_instructions}{PROJECT_DOC_SEPARATOR}{project_doc}"
|
||||
@@ -41,95 +42,135 @@ pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to locate and load the project documentation. Currently, the search
|
||||
/// starts from `Config::cwd`, but if we may want to consider other directories
|
||||
/// in the future, e.g., additional writable directories in the `SandboxPolicy`.
|
||||
/// Attempt to locate and load the project documentation.
|
||||
///
|
||||
/// On success returns `Ok(Some(contents))`. If no documentation file is found
|
||||
/// the function returns `Ok(None)`. Unexpected I/O failures bubble up as
|
||||
/// `Err` so callers can decide how to handle them.
|
||||
async fn find_project_doc(config: &Config) -> std::io::Result<Option<String>> {
|
||||
let max_bytes = config.project_doc_max_bytes;
|
||||
/// On success returns `Ok(Some(contents))` where `contents` is the
|
||||
/// concatenation of all discovered docs. If no documentation file is found the
|
||||
/// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so
|
||||
/// callers can decide how to handle them.
|
||||
pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String>> {
|
||||
let max_total = config.project_doc_max_bytes;
|
||||
|
||||
// Attempt to load from the working directory first.
|
||||
if let Some(doc) = load_first_candidate(&config.cwd, CANDIDATE_FILENAMES, max_bytes).await? {
|
||||
return Ok(Some(doc));
|
||||
if max_total == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Walk up towards the filesystem root, stopping once we encounter the Git
|
||||
// repository root. The presence of **either** a `.git` *file* or
|
||||
// *directory* counts.
|
||||
let mut dir = config.cwd.clone();
|
||||
let paths = discover_project_doc_paths(config)?;
|
||||
if paths.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Canonicalize the path so that we do not end up in an infinite loop when
|
||||
// `cwd` contains `..` components.
|
||||
let mut remaining: u64 = max_total as u64;
|
||||
let mut parts: Vec<String> = Vec::new();
|
||||
|
||||
for p in paths {
|
||||
if remaining == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let file = match tokio::fs::File::open(&p).await {
|
||||
Ok(f) => f,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let size = file.metadata().await?.len();
|
||||
let mut reader = tokio::io::BufReader::new(file).take(remaining);
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
reader.read_to_end(&mut data).await?;
|
||||
|
||||
if size > remaining {
|
||||
tracing::warn!(
|
||||
"Project doc `{}` exceeds remaining budget ({} bytes) - truncating.",
|
||||
p.display(),
|
||||
remaining,
|
||||
);
|
||||
}
|
||||
|
||||
let text = String::from_utf8_lossy(&data).to_string();
|
||||
if !text.trim().is_empty() {
|
||||
parts.push(text);
|
||||
remaining = remaining.saturating_sub(data.len() as u64);
|
||||
}
|
||||
}
|
||||
|
||||
if parts.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(parts.join("\n\n")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Discover the list of AGENTS.md files using the same search rules as
|
||||
/// `read_project_docs`, but return the file paths instead of concatenated
|
||||
/// contents. The list is ordered from repository root to the current working
|
||||
/// directory (inclusive). Symlinks are allowed. When `project_doc_max_bytes`
|
||||
/// is zero, returns an empty list.
|
||||
pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBuf>> {
|
||||
let mut dir = config.cwd.clone();
|
||||
if let Ok(canon) = dir.canonicalize() {
|
||||
dir = canon;
|
||||
}
|
||||
|
||||
while let Some(parent) = dir.parent() {
|
||||
// `.git` can be a *file* (for worktrees or submodules) or a *dir*.
|
||||
let git_marker = dir.join(".git");
|
||||
let git_exists = match tokio::fs::metadata(&git_marker).await {
|
||||
// Build chain from cwd upwards and detect git root.
|
||||
let mut chain: Vec<PathBuf> = vec![dir.clone()];
|
||||
let mut git_root: Option<PathBuf> = None;
|
||||
let mut cursor = dir.clone();
|
||||
while let Some(parent) = cursor.parent() {
|
||||
let git_marker = cursor.join(".git");
|
||||
let git_exists = match std::fs::metadata(&git_marker) {
|
||||
Ok(_) => true,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => false,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
if git_exists {
|
||||
// We are at the repo root – attempt one final load.
|
||||
if let Some(doc) = load_first_candidate(&dir, CANDIDATE_FILENAMES, max_bytes).await? {
|
||||
return Ok(Some(doc));
|
||||
}
|
||||
git_root = Some(cursor.clone());
|
||||
break;
|
||||
}
|
||||
|
||||
dir = parent.to_path_buf();
|
||||
chain.push(parent.to_path_buf());
|
||||
cursor = parent.to_path_buf();
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Attempt to load the first candidate file found in `dir`. Returns the file
|
||||
/// contents (truncated if it exceeds `max_bytes`) when successful.
|
||||
async fn load_first_candidate(
|
||||
dir: &Path,
|
||||
names: &[&str],
|
||||
max_bytes: usize,
|
||||
) -> std::io::Result<Option<String>> {
|
||||
for name in names {
|
||||
let candidate = dir.join(name);
|
||||
|
||||
let file = match tokio::fs::File::open(&candidate).await {
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
Ok(f) => f,
|
||||
};
|
||||
|
||||
let size = file.metadata().await?.len();
|
||||
|
||||
let reader = tokio::io::BufReader::new(file);
|
||||
let mut data = Vec::with_capacity(std::cmp::min(size as usize, max_bytes));
|
||||
let mut limited = reader.take(max_bytes as u64);
|
||||
limited.read_to_end(&mut data).await?;
|
||||
|
||||
if size as usize > max_bytes {
|
||||
tracing::warn!(
|
||||
"Project doc `{}` exceeds {max_bytes} bytes - truncating.",
|
||||
candidate.display(),
|
||||
);
|
||||
let search_dirs: Vec<PathBuf> = if let Some(root) = git_root {
|
||||
let mut dirs: Vec<PathBuf> = Vec::new();
|
||||
let mut saw_root = false;
|
||||
for p in chain.iter().rev() {
|
||||
if !saw_root {
|
||||
if p == &root {
|
||||
saw_root = true;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
dirs.push(p.clone());
|
||||
}
|
||||
dirs
|
||||
} else {
|
||||
vec![config.cwd.clone()]
|
||||
};
|
||||
|
||||
let contents = String::from_utf8_lossy(&data).to_string();
|
||||
if contents.trim().is_empty() {
|
||||
// Empty file – treat as not found.
|
||||
continue;
|
||||
let mut found: Vec<PathBuf> = Vec::new();
|
||||
for d in search_dirs {
|
||||
for name in CANDIDATE_FILENAMES {
|
||||
let candidate = d.join(name);
|
||||
match std::fs::symlink_metadata(&candidate) {
|
||||
Ok(md) => {
|
||||
let ft = md.file_type();
|
||||
// Allow regular files and symlinks; opening will later fail for dangling links.
|
||||
if ft.is_file() || ft.is_symlink() {
|
||||
found.push(candidate);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(Some(contents));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -278,4 +319,32 @@ mod tests {
|
||||
|
||||
assert_eq!(res, Some(INSTRUCTIONS.to_string()));
|
||||
}
|
||||
|
||||
/// When both the repository root and the working directory contain
|
||||
/// AGENTS.md files, their contents are concatenated from root to cwd.
|
||||
#[tokio::test]
|
||||
async fn concatenates_root_and_cwd_docs() {
|
||||
let repo = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
// Simulate a git repository.
|
||||
std::fs::write(
|
||||
repo.path().join(".git"),
|
||||
"gitdir: /path/to/actual/git/dir\n",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Repo root doc.
|
||||
fs::write(repo.path().join("AGENTS.md"), "root doc").unwrap();
|
||||
|
||||
// Nested working directory with its own doc.
|
||||
let nested = repo.path().join("workspace/crate_a");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
fs::write(nested.join("AGENTS.md"), "crate doc").unwrap();
|
||||
|
||||
let mut cfg = make_config(&repo, 4096, None);
|
||||
cfg.cwd = nested;
|
||||
|
||||
let res = get_user_instructions(&cfg).await.expect("doc expected");
|
||||
assert_eq!(res, "root doc\n\ncrate doc");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ use uuid::Uuid;
|
||||
use crate::config::Config;
|
||||
use crate::git_info::GitInfo;
|
||||
use crate::git_info::collect_git_info;
|
||||
use crate::models::ResponseItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
const SESSIONS_SUBDIR: &str = "sessions";
|
||||
|
||||
@@ -132,6 +132,8 @@ impl RolloutRecorder {
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => filtered.push(item.clone()),
|
||||
ResponseItem::Other => {
|
||||
// These should never be serialized.
|
||||
@@ -194,6 +196,8 @@ impl RolloutRecorder {
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => items.push(item),
|
||||
ResponseItem::Other => {}
|
||||
},
|
||||
@@ -317,6 +321,8 @@ async fn rollout_writer(
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => {
|
||||
writer.write_line(&item).await?;
|
||||
}
|
||||
|
||||
@@ -1,14 +1,24 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use shlex;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct ZshShell {
|
||||
shell_path: String,
|
||||
zshrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct PowerShellConfig {
|
||||
exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub enum Shell {
|
||||
Zsh(ZshShell),
|
||||
PowerShell(PowerShellConfig),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
@@ -33,6 +43,61 @@ impl Shell {
|
||||
}
|
||||
Some(result)
|
||||
}
|
||||
Shell::PowerShell(ps) => {
|
||||
// If model generated a bash command, prefer a detected bash fallback
|
||||
if let Some(script) = strip_bash_lc(&command) {
|
||||
return match &ps.bash_exe_fallback {
|
||||
Some(bash) => Some(vec![
|
||||
bash.to_string_lossy().to_string(),
|
||||
"-lc".to_string(),
|
||||
script,
|
||||
]),
|
||||
|
||||
// No bash fallback → run the script under PowerShell.
|
||||
// It will likely fail (except for some simple commands), but the error
|
||||
// should give a clue to the model to fix upon retry that it's running under PowerShell.
|
||||
None => Some(vec![
|
||||
ps.exe.clone(),
|
||||
"-NoProfile".to_string(),
|
||||
"-Command".to_string(),
|
||||
script,
|
||||
]),
|
||||
};
|
||||
}
|
||||
|
||||
// Not a bash command. If model did not generate a PowerShell command,
|
||||
// turn it into a PowerShell command.
|
||||
let first = command.first().map(String::as_str);
|
||||
if first != Some(ps.exe.as_str()) {
|
||||
// TODO (CODEX_2900): Handle escaping newlines.
|
||||
if command.iter().any(|a| a.contains('\n') || a.contains('\r')) {
|
||||
return Some(command);
|
||||
}
|
||||
|
||||
let joined = shlex::try_join(command.iter().map(|s| s.as_str())).ok();
|
||||
return joined.map(|arg| {
|
||||
vec![
|
||||
ps.exe.clone(),
|
||||
"-NoProfile".to_string(),
|
||||
"-Command".to_string(),
|
||||
arg,
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
// Model generated a PowerShell command. Run it.
|
||||
Some(command)
|
||||
}
|
||||
Shell::Unknown => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> Option<String> {
|
||||
match self {
|
||||
Shell::Zsh(zsh) => std::path::Path::new(&zsh.shell_path)
|
||||
.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string()),
|
||||
Shell::PowerShell(ps) => Some(ps.exe.clone()),
|
||||
Shell::Unknown => None,
|
||||
}
|
||||
}
|
||||
@@ -86,11 +151,51 @@ pub async fn default_user_shell() -> Shell {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
#[cfg(all(not(target_os = "macos"), not(target_os = "windows")))]
|
||||
pub async fn default_user_shell() -> Shell {
|
||||
Shell::Unknown
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub async fn default_user_shell() -> Shell {
|
||||
use tokio::process::Command;
|
||||
|
||||
// Prefer PowerShell 7+ (`pwsh`) if available, otherwise fall back to Windows PowerShell.
|
||||
let has_pwsh = Command::new("pwsh")
|
||||
.arg("-NoLogo")
|
||||
.arg("-NoProfile")
|
||||
.arg("-Command")
|
||||
.arg("$PSVersionTable.PSVersion.Major")
|
||||
.output()
|
||||
.await
|
||||
.map(|o| o.status.success())
|
||||
.unwrap_or(false);
|
||||
let bash_exe = if Command::new("bash.exe")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.await
|
||||
.ok()
|
||||
.map(|o| o.status.success())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
which::which("bash.exe").ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if has_pwsh {
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: bash_exe,
|
||||
})
|
||||
} else {
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: bash_exe,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
@@ -231,3 +336,97 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(target_os = "windows")]
|
||||
mod tests_windows {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_default_shell_invocation_powershell() {
|
||||
let cases = vec![
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: None,
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: None,
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["powershell.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["bash.exe", "-lc", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec![
|
||||
"bash",
|
||||
"-lc",
|
||||
"apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: destination_file.txt\n-original content\n+modified content\n*** End Patch\nEOF",
|
||||
],
|
||||
vec![
|
||||
"bash.exe",
|
||||
"-lc",
|
||||
"apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: destination_file.txt\n-original content\n+modified content\n*** End Patch\nEOF",
|
||||
],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["echo", "hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
// TODO (CODEX_2900): Handle escaping newlines for powershell invocation.
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec![
|
||||
"codex-mcp-server.exe",
|
||||
"--codex-run-as-apply-patch",
|
||||
"*** Begin Patch\n*** Update File: C:\\Users\\person\\destination_file.txt\n-original content\n+modified content\n*** End Patch",
|
||||
],
|
||||
vec![
|
||||
"codex-mcp-server.exe",
|
||||
"--codex-run-as-apply-patch",
|
||||
"*** Begin Patch\n*** Update File: C:\\Users\\person\\destination_file.txt\n-original content\n+modified content\n*** End Patch",
|
||||
],
|
||||
),
|
||||
];
|
||||
|
||||
for (shell, input, expected_cmd) in cases {
|
||||
let actual_cmd = shell
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
assert_eq!(
|
||||
actual_cmd,
|
||||
Some(expected_cmd.iter().map(|s| s.to_string()).collect())
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
145
codex-rs/core/src/tool_apply_patch.rs
Normal file
145
codex-rs/core/src/tool_apply_patch.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::openai_tools::FreeformTool;
|
||||
use crate::openai_tools::FreeformToolFormat;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct ApplyPatchToolArgs {
|
||||
pub(crate) input: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ApplyPatchToolType {
|
||||
Freeform,
|
||||
Function,
|
||||
}
|
||||
|
||||
/// Returns a custom tool that can be used to edit files. Well-suited for GPT-5 models
|
||||
/// https://platform.openai.com/docs/guides/function-calling#custom-tools
|
||||
pub(crate) fn create_apply_patch_freeform_tool() -> OpenAiTool {
|
||||
OpenAiTool::Freeform(FreeformTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: "Use the `apply_patch` tool to edit files".to_string(),
|
||||
format: FreeformToolFormat {
|
||||
r#type: "grammar".to_string(),
|
||||
syntax: "lark".to_string(),
|
||||
definition: r#"start: begin_patch hunk+ end_patch
|
||||
begin_patch: "*** Begin Patch" LF
|
||||
end_patch: "*** End Patch" LF?
|
||||
|
||||
hunk: add_hunk | delete_hunk | update_hunk
|
||||
add_hunk: "*** Add File: " filename LF add_line+
|
||||
delete_hunk: "*** Delete File: " filename LF
|
||||
update_hunk: "*** Update File: " filename LF change_move? change?
|
||||
|
||||
filename: /(.+)/
|
||||
add_line: "+" /(.+)/ LF -> line
|
||||
|
||||
change_move: "*** Move to: " filename LF
|
||||
change: (change_context | change_line)+ eof_line?
|
||||
change_context: ("@@" | "@@ " /(.+)/) LF
|
||||
change_line: ("+" | "-" | " ") /(.+)/ LF
|
||||
eof_line: "*** End of File" LF
|
||||
|
||||
%import common.LF
|
||||
"#
|
||||
.to_string(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a json tool that can be used to edit files. Should only be used with gpt-oss models
|
||||
pub(crate) fn create_apply_patch_json_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"input".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: r#"Use the `apply_patch` tool to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
*** Begin Patch
|
||||
[ one or more file sections ]
|
||||
*** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
*** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by *** Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
For instructions on [context_before] and [context_after]:
|
||||
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines.
|
||||
- If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have:
|
||||
@@ class BaseClass
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance:
|
||||
|
||||
@@ class BaseClass
|
||||
@@ def method():
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
The full grammar definition is below:
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "*** Begin Patch" NEWLINE
|
||||
End := "*** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "*** Delete File: " path NEWLINE
|
||||
UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "*** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.txt
|
||||
+Hello world
|
||||
*** Update File: src/app.py
|
||||
*** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
"#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["input".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -142,13 +142,14 @@ async fn includes_session_id_and_model_headers_in_request() {
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
conversation_id,
|
||||
session_configured: _,
|
||||
} = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation");
|
||||
|
||||
@@ -207,9 +208,10 @@ async fn includes_base_instructions_override_in_request() {
|
||||
config.base_instructions = Some("test instructions".to_string());
|
||||
config.model_provider = model_provider;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -262,9 +264,10 @@ async fn originator_config_override_is_used() {
|
||||
config.model_provider = model_provider;
|
||||
config.responses_originator_header = "my_override".to_owned();
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -318,13 +321,13 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::with_auth(create_dummy_codex_auth());
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
conversation_id,
|
||||
session_configured: _,
|
||||
} = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(create_dummy_codex_auth()))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation");
|
||||
|
||||
@@ -411,7 +414,13 @@ async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ChatGPT;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_login::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {}", e),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
..
|
||||
@@ -486,7 +495,13 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ApiKey;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_login::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {}", e),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
..
|
||||
@@ -540,9 +555,10 @@ async fn includes_user_instructions_message_in_request() {
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be nice".to_string());
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -632,9 +648,9 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = provider;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::with_auth(create_dummy_codex_auth());
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, None)
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -708,9 +724,9 @@ async fn env_var_overrides_loaded_auth() {
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = provider;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::with_auth(create_dummy_codex_auth());
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(create_dummy_codex_auth()))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
@@ -141,9 +141,9 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("dummy")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
@@ -8,6 +11,7 @@ use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_core::shell::default_user_shell;
|
||||
use codex_login::CodexAuth;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
@@ -24,6 +28,185 @@ fn sse_completed(id: &str) -> String {
|
||||
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
|
||||
}
|
||||
|
||||
fn assert_tool_names(body: &serde_json::Value, expected_names: &[&str]) {
|
||||
assert_eq!(
|
||||
body["tools"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|t| t["name"].as_str().unwrap().to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
expected_names
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn codex_mini_latest_tools() {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse = sse_completed("resp");
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse, "text/event-stream");
|
||||
|
||||
// Expect two POSTs to /v1/responses
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(template)
|
||||
.expect(2)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
config.include_apply_patch_tool = false;
|
||||
config.model = "codex-mini-latest".to_string();
|
||||
config.model_family = find_family_for_model("codex-mini-latest").unwrap();
|
||||
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let expected_instructions = [
|
||||
include_str!("../prompt.md"),
|
||||
include_str!("../../apply-patch/apply_patch_tool_instructions.md"),
|
||||
]
|
||||
.join("\n");
|
||||
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
serde_json::json!(expected_instructions),
|
||||
);
|
||||
let body1 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body1["instructions"],
|
||||
serde_json::json!(expected_instructions),
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn prompt_tools_are_consistent_across_requests() {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse = sse_completed("resp");
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse, "text/event-stream");
|
||||
|
||||
// Expect two POSTs to /v1/responses
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(template)
|
||||
.expect(2)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
config.include_apply_patch_tool = true;
|
||||
config.include_plan_tool = true;
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let expected_instructions: &str = include_str!("../prompt.md");
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch"];
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
serde_json::json!(expected_instructions),
|
||||
);
|
||||
assert_tool_names(&body0, expected_tools_names);
|
||||
|
||||
let body1 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body1["instructions"],
|
||||
serde_json::json!(expected_instructions),
|
||||
);
|
||||
assert_tool_names(&body1, expected_tools_names);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn prefixes_context_and_instructions_once_and_consistently_across_requests() {
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -55,9 +238,10 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -85,9 +269,20 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(requests.len(), 2, "expected two POST requests");
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
|
||||
let expected_env_text = format!(
|
||||
"<environment_context>\nCurrent working directory: {}\nApproval policy: on-request\nSandbox mode: read-only\nNetwork access: restricted\n</environment_context>",
|
||||
cwd.path().to_string_lossy()
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
{}</environment_context>"#,
|
||||
cwd.path().to_string_lossy(),
|
||||
match shell.name() {
|
||||
Some(name) => format!(" <shell>{}</shell>\n", name),
|
||||
None => String::new(),
|
||||
}
|
||||
);
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
@@ -165,9 +360,10 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
@@ -183,12 +379,10 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
// Change everything about the turn context.
|
||||
let new_cwd = TempDir::new().unwrap();
|
||||
let writable = TempDir::new().unwrap();
|
||||
codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: Some(new_cwd.path().to_path_buf()),
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox_policy: Some(SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![writable.path().to_path_buf()],
|
||||
@@ -220,7 +414,6 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
|
||||
|
||||
// prompt_cache_key should remain constant across overrides
|
||||
assert_eq!(
|
||||
body1["prompt_cache_key"], body2["prompt_cache_key"],
|
||||
@@ -236,11 +429,13 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
|
||||
"content": [ { "type": "input_text", "text": "hello 2" } ]
|
||||
});
|
||||
// After overriding the turn context, the environment context should be emitted again
|
||||
// reflecting the new cwd, approval policy and sandbox settings.
|
||||
let expected_env_text_2 = format!(
|
||||
"<environment_context>\nCurrent working directory: {}\nApproval policy: never\nSandbox mode: workspace-write\nNetwork access: enabled\n</environment_context>",
|
||||
new_cwd.path().to_string_lossy()
|
||||
);
|
||||
// reflecting the new approval policy and sandbox settings. Omit cwd because it did
|
||||
// not change.
|
||||
let expected_env_text_2 = r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#;
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"id": serde_json::Value::Null,
|
||||
@@ -288,9 +483,10 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
config.model_provider = model_provider;
|
||||
config.user_instructions = Some("be consistent and helpful".to_string());
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
@@ -88,9 +88,10 @@ async fn continue_after_stream_error() {
|
||||
config.base_instructions = Some("You are a helpful assistant".to_string());
|
||||
config.model_provider = provider;
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
@@ -93,9 +93,10 @@ async fn retries_on_early_close() {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
|
||||
@@ -25,6 +25,7 @@ codex-common = { path = "../common", features = [
|
||||
"sandbox_summary",
|
||||
] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-ollama = { path = "../ollama" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
owo-colors = "4.2.0"
|
||||
|
||||
@@ -20,6 +20,7 @@ use codex_core::protocol::McpToolCallEndEvent;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TurnAbortReason;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
@@ -174,6 +175,9 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::StreamError(StreamErrorEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::TaskStarted => {
|
||||
// Ignore.
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::util::is_inside_git_repo;
|
||||
use codex_login::AuthManager;
|
||||
use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use event_processor_with_human_output::EventProcessorWithHumanOutput;
|
||||
@@ -185,7 +186,10 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let conversation_manager = ConversationManager::default();
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
|
||||
@@ -123,6 +123,155 @@ async fn test_apply_patch_tool() -> anyhow::Result<()> {
|
||||
// Start a mock model server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// First response: model calls apply_patch to create test.md
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_ADD, "call1"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
// .and(path("/v1/responses"))
|
||||
.respond_with(first)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
// Second response: model calls apply_patch to update test.md
|
||||
let second = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_UPDATE, "call2"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(second)
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let final_completed = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(
|
||||
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_COMPLETED, "resp3"),
|
||||
"text/event-stream",
|
||||
);
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(final_completed)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let tmp_cwd = TempDir::new().unwrap();
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.current_dir(tmp_cwd.path())
|
||||
.env("CODEX_HOME", tmp_cwd.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()))
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-s")
|
||||
.arg("workspace-write")
|
||||
.arg("foo")
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
// Verify final file contents
|
||||
let final_path = tmp_cwd.path().join("test.md");
|
||||
let contents = std::fs::read_to_string(&final_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
|
||||
assert_eq!(contents, "Final text\n");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[tokio::test]
|
||||
async fn test_apply_patch_freeform_tool() -> anyhow::Result<()> {
|
||||
use core_test_support::load_sse_fixture_with_id_from_str;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
const SSE_TOOL_CALL_ADD: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Add File: test.md\n+Hello world\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_UPDATE: &str = r#"[
|
||||
{
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "custom_tool_call",
|
||||
"name": "apply_patch",
|
||||
"input": "*** Begin Patch\n*** Update File: test.md\n@@\n-Hello world\n+Final text\n*** End Patch",
|
||||
"call_id": "__ID__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
const SSE_TOOL_CALL_COMPLETED: &str = r#"[
|
||||
{
|
||||
"type": "response.completed",
|
||||
"response": {
|
||||
"id": "__ID__",
|
||||
"usage": {
|
||||
"input_tokens": 0,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 0,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 0
|
||||
},
|
||||
"output": []
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
|
||||
// Start a mock model server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
// First response: model calls apply_patch to create test.md
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
|
||||
@@ -26,7 +26,7 @@ multimap = "0.10.0"
|
||||
path-absolutize = "3.1.1"
|
||||
regex-lite = "0.1"
|
||||
serde = { version = "1.0.194", features = ["derive"] }
|
||||
serde_json = "1.0.142"
|
||||
serde_json = "1.0.143"
|
||||
serde_with = { version = "3", features = ["macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -17,5 +17,5 @@ clap = { version = "4", features = ["derive"] }
|
||||
ignore = "0.4.23"
|
||||
nucleo-matcher = "0.3.1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1.0.142"
|
||||
serde_json = "1.0.143"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
@@ -9,6 +9,7 @@ workspace = true
|
||||
[dependencies]
|
||||
base64 = "0.22"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
rand = "0.8"
|
||||
reqwest = { version = "0.12", features = ["json", "blocking"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
|
||||
129
codex-rs/login/src/auth_manager.rs
Normal file
129
codex-rs/login/src/auth_manager.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use crate::AuthMode;
|
||||
use crate::CodexAuth;
|
||||
|
||||
/// Internal cached auth state.
|
||||
#[derive(Clone, Debug)]
|
||||
struct CachedAuth {
|
||||
preferred_auth_mode: AuthMode,
|
||||
auth: Option<CodexAuth>,
|
||||
}
|
||||
|
||||
/// Central manager providing a single source of truth for auth.json derived
|
||||
/// authentication data. It loads once (or on preference change) and then
|
||||
/// hands out cloned `CodexAuth` values so the rest of the program has a
|
||||
/// consistent snapshot.
|
||||
///
|
||||
/// External modifications to `auth.json` will NOT be observed until
|
||||
/// `reload()` is called explicitly. This matches the design goal of avoiding
|
||||
/// different parts of the program seeing inconsistent auth data mid‑run.
|
||||
#[derive(Debug)]
|
||||
pub struct AuthManager {
|
||||
codex_home: PathBuf,
|
||||
inner: RwLock<CachedAuth>,
|
||||
}
|
||||
|
||||
impl AuthManager {
|
||||
/// Create a new manager loading the initial auth using the provided
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Self {
|
||||
let auth = crate::CodexAuth::from_codex_home(&codex_home, preferred_auth_mode)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an AuthManager with a specific CodexAuth, for testing only.
|
||||
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
|
||||
let preferred_auth_mode = auth.mode;
|
||||
let cached = CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth: Some(auth),
|
||||
};
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::new(),
|
||||
inner: RwLock::new(cached),
|
||||
})
|
||||
}
|
||||
|
||||
/// Current cached auth (clone). May be `None` if not logged in or load failed.
|
||||
pub fn auth(&self) -> Option<CodexAuth> {
|
||||
self.inner.read().ok().and_then(|c| c.auth.clone())
|
||||
}
|
||||
|
||||
/// Preferred auth method used when (re)loading.
|
||||
pub fn preferred_auth_method(&self) -> AuthMode {
|
||||
self.inner
|
||||
.read()
|
||||
.map(|c| c.preferred_auth_mode)
|
||||
.unwrap_or(AuthMode::ApiKey)
|
||||
}
|
||||
|
||||
/// Force a reload using the existing preferred auth method. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let preferred = self.preferred_auth_method();
|
||||
let new_auth = crate::CodexAuth::from_codex_home(&self.codex_home, preferred)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
changed
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn auths_equal(a: &Option<CodexAuth>, b: &Option<CodexAuth>) -> bool {
|
||||
match (a, b) {
|
||||
(None, None) => true,
|
||||
(Some(a), Some(b)) => a == b,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, preferred_auth_mode))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
/// the auth state from disk so other components observe refreshed token.
|
||||
pub async fn refresh_token(&self) -> std::io::Result<Option<String>> {
|
||||
let auth = match self.auth() {
|
||||
Some(a) => a,
|
||||
None => return Ok(None),
|
||||
};
|
||||
match auth.refresh_token().await {
|
||||
Ok(token) => {
|
||||
// Reload to pick up persisted changes.
|
||||
self.reload();
|
||||
Ok(Some(token))
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Log out by deleting the on‑disk auth.json (if present). Returns Ok(true)
|
||||
/// if a file was removed, Ok(false) if no auth file existed. On success,
|
||||
/// reloads the in‑memory auth cache so callers immediately observe the
|
||||
/// unauthenticated state.
|
||||
pub fn logout(&self) -> std::io::Result<bool> {
|
||||
let removed = crate::logout(&self.codex_home)?;
|
||||
// Always reload to clear any cached auth (even if file absent).
|
||||
self.reload();
|
||||
Ok(removed)
|
||||
}
|
||||
}
|
||||
@@ -23,19 +23,15 @@ pub use crate::server::run_login_server;
|
||||
pub use crate::token_data::TokenData;
|
||||
use crate::token_data::parse_id_token;
|
||||
|
||||
mod auth_manager;
|
||||
mod pkce;
|
||||
mod server;
|
||||
mod token_data;
|
||||
|
||||
pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann";
|
||||
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Copy, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AuthMode {
|
||||
ApiKey,
|
||||
ChatGPT,
|
||||
}
|
||||
pub use auth_manager::AuthManager;
|
||||
pub use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CodexAuth {
|
||||
|
||||
@@ -17,7 +17,7 @@ workspace = true
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
codex-arg0 = { path = "../arg0" }
|
||||
codex-common = { path = "../common" }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
|
||||
@@ -14,7 +14,10 @@ use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecApprovalRequestEvent;
|
||||
use codex_core::protocol::ReviewDecision;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::GitDiffToRemoteResponse;
|
||||
use codex_protocol::mcp_protocol::RunSubagentParams;
|
||||
use mcp_types::JSONRPCErrorError;
|
||||
use mcp_types::RequestId;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -25,6 +28,7 @@ use uuid::Uuid;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::json_to_toml::json_to_toml;
|
||||
use crate::mock_data::subagent_mock_response_from_diff;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::outgoing_message::OutgoingNotification;
|
||||
use codex_core::protocol::InputItem as CoreInputItem;
|
||||
@@ -38,6 +42,7 @@ use codex_protocol::mcp_protocol::AddConversationListenerParams;
|
||||
use codex_protocol::mcp_protocol::AddConversationSubscriptionResponse;
|
||||
use codex_protocol::mcp_protocol::ApplyPatchApprovalParams;
|
||||
use codex_protocol::mcp_protocol::ApplyPatchApprovalResponse;
|
||||
use codex_protocol::mcp_protocol::AuthStatusChangeNotification;
|
||||
use codex_protocol::mcp_protocol::ClientRequest;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::mcp_protocol::EXEC_COMMAND_APPROVAL_METHOD;
|
||||
@@ -46,7 +51,6 @@ use codex_protocol::mcp_protocol::ExecCommandApprovalResponse;
|
||||
use codex_protocol::mcp_protocol::InputItem as WireInputItem;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationParams;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationResponse;
|
||||
use codex_protocol::mcp_protocol::LOGIN_CHATGPT_COMPLETE_EVENT;
|
||||
use codex_protocol::mcp_protocol::LoginChatGptCompleteNotification;
|
||||
use codex_protocol::mcp_protocol::LoginChatGptResponse;
|
||||
use codex_protocol::mcp_protocol::NewConversationParams;
|
||||
@@ -57,6 +61,7 @@ use codex_protocol::mcp_protocol::SendUserMessageParams;
|
||||
use codex_protocol::mcp_protocol::SendUserMessageResponse;
|
||||
use codex_protocol::mcp_protocol::SendUserTurnParams;
|
||||
use codex_protocol::mcp_protocol::SendUserTurnResponse;
|
||||
use codex_protocol::mcp_protocol::ServerNotification;
|
||||
|
||||
// Duration before a ChatGPT login attempt is abandoned.
|
||||
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
||||
@@ -74,9 +79,11 @@ impl ActiveLogin {
|
||||
|
||||
/// Handles JSON-RPC messages for Codex conversations.
|
||||
pub(crate) struct CodexMessageProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
conversation_manager: Arc<ConversationManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
||||
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
||||
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
||||
@@ -85,14 +92,18 @@ pub(crate) struct CodexMessageProcessor {
|
||||
|
||||
impl CodexMessageProcessor {
|
||||
pub fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
conversation_manager: Arc<ConversationManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
conversation_manager,
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
config,
|
||||
conversation_listeners: HashMap::new(),
|
||||
active_login: Arc::new(Mutex::new(None)),
|
||||
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
|
||||
@@ -122,32 +133,29 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::RemoveConversationListener { request_id, params } => {
|
||||
self.remove_conversation_listener(request_id, params).await;
|
||||
}
|
||||
ClientRequest::GitDiffToRemote { request_id, params } => {
|
||||
self.git_diff_to_origin(request_id, params.cwd).await;
|
||||
}
|
||||
ClientRequest::LoginChatGpt { request_id } => {
|
||||
self.login_chatgpt(request_id).await;
|
||||
}
|
||||
ClientRequest::CancelLoginChatGpt { request_id, params } => {
|
||||
self.cancel_login_chatgpt(request_id, params.login_id).await;
|
||||
}
|
||||
ClientRequest::GitDiffToRemote { request_id, params } => {
|
||||
self.git_diff_to_origin(request_id, params.cwd).await;
|
||||
ClientRequest::LogoutChatGpt { request_id } => {
|
||||
self.logout_chatgpt(request_id).await;
|
||||
}
|
||||
ClientRequest::GetAuthStatus { request_id, params } => {
|
||||
self.get_auth_status(request_id, params).await;
|
||||
}
|
||||
ClientRequest::RunSubagent { request_id, params } => {
|
||||
self.run_subagent(request_id, params).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn login_chatgpt(&mut self, request_id: RequestId) {
|
||||
let config =
|
||||
match Config::load_with_cli_overrides(Default::default(), ConfigOverrides::default()) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("error loading config for login: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config = self.config.as_ref();
|
||||
|
||||
let opts = LoginServerOptions {
|
||||
open_browser: false,
|
||||
@@ -184,6 +192,7 @@ impl CodexMessageProcessor {
|
||||
// Spawn background task to monitor completion.
|
||||
let outgoing_clone = self.outgoing.clone();
|
||||
let active_login = self.active_login.clone();
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
tokio::spawn(async move {
|
||||
let (success, error_msg) = match tokio::time::timeout(
|
||||
LOGIN_CHATGPT_TIMEOUT,
|
||||
@@ -199,19 +208,30 @@ impl CodexMessageProcessor {
|
||||
(false, Some("Login timed out".to_string()))
|
||||
}
|
||||
};
|
||||
let notification = LoginChatGptCompleteNotification {
|
||||
let payload = LoginChatGptCompleteNotification {
|
||||
login_id,
|
||||
success,
|
||||
error: error_msg,
|
||||
};
|
||||
let params = serde_json::to_value(¬ification).ok();
|
||||
outgoing_clone
|
||||
.send_notification(OutgoingNotification {
|
||||
method: LOGIN_CHATGPT_COMPLETE_EVENT.to_string(),
|
||||
params,
|
||||
})
|
||||
.send_server_notification(ServerNotification::LoginChatGptComplete(payload))
|
||||
.await;
|
||||
|
||||
// Send an auth status change notification.
|
||||
if success {
|
||||
// Update in-memory auth cache now that login completed.
|
||||
auth_manager.reload();
|
||||
|
||||
// Notify clients with the actual current auth mode.
|
||||
let current_auth_method = auth_manager.auth().map(|a| a.mode);
|
||||
let payload = AuthStatusChangeNotification {
|
||||
auth_method: current_auth_method,
|
||||
};
|
||||
outgoing_clone
|
||||
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
||||
.await;
|
||||
}
|
||||
|
||||
// Clear the active login if it matches this attempt. It may have been replaced or cancelled.
|
||||
let mut guard = active_login.lock().await;
|
||||
if guard.as_ref().map(|l| l.login_id) == Some(login_id) {
|
||||
@@ -260,6 +280,112 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn logout_chatgpt(&mut self, request_id: RequestId) {
|
||||
{
|
||||
// Cancel any active login attempt.
|
||||
let mut guard = self.active_login.lock().await;
|
||||
if let Some(active) = guard.take() {
|
||||
active.drop();
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = self.auth_manager.logout() {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("logout failed: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
|
||||
self.outgoing
|
||||
.send_response(
|
||||
request_id,
|
||||
codex_protocol::mcp_protocol::LogoutChatGptResponse {},
|
||||
)
|
||||
.await;
|
||||
|
||||
// Send auth status change notification reflecting the current auth mode
|
||||
// after logout (which may fall back to API key via env var).
|
||||
let current_auth_method = self.auth_manager.auth().map(|auth| auth.mode);
|
||||
let payload = AuthStatusChangeNotification {
|
||||
auth_method: current_auth_method,
|
||||
};
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn get_auth_status(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
params: codex_protocol::mcp_protocol::GetAuthStatusParams,
|
||||
) {
|
||||
let preferred_auth_method: AuthMode = self.auth_manager.preferred_auth_method();
|
||||
let include_token = params.include_token.unwrap_or(false);
|
||||
let do_refresh = params.refresh_token.unwrap_or(false);
|
||||
|
||||
if do_refresh && let Err(err) = self.auth_manager.refresh_token().await {
|
||||
tracing::warn!("failed to refresh token while getting auth status: {err}");
|
||||
}
|
||||
|
||||
let response = match self.auth_manager.auth() {
|
||||
Some(auth) => {
|
||||
let (reported_auth_method, token_opt) = match auth.get_token().await {
|
||||
Ok(token) if !token.is_empty() => {
|
||||
let tok = if include_token { Some(token) } else { None };
|
||||
(Some(auth.mode), tok)
|
||||
}
|
||||
Ok(_) => (None, None),
|
||||
Err(err) => {
|
||||
tracing::warn!("failed to get token for auth status: {err}");
|
||||
(None, None)
|
||||
}
|
||||
};
|
||||
codex_protocol::mcp_protocol::GetAuthStatusResponse {
|
||||
auth_method: reported_auth_method,
|
||||
preferred_auth_method,
|
||||
auth_token: token_opt,
|
||||
}
|
||||
}
|
||||
None => codex_protocol::mcp_protocol::GetAuthStatusResponse {
|
||||
auth_method: None,
|
||||
preferred_auth_method,
|
||||
auth_token: None,
|
||||
},
|
||||
};
|
||||
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn run_subagent(&self, request_id: RequestId, params: RunSubagentParams) {
|
||||
let RunSubagentParams {
|
||||
conversation_id,
|
||||
subagant,
|
||||
input: _input,
|
||||
} = params;
|
||||
let Ok(_conversation) = self
|
||||
.conversation_manager
|
||||
.get_conversation(conversation_id.0)
|
||||
.await
|
||||
else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("conversation not found: {conversation_id}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
};
|
||||
// Build findings exclusively from the actual git diff used by the TUI.
|
||||
let response = match git_diff_to_remote(&self.config.cwd).await {
|
||||
Some(diff) => subagent_mock_response_from_diff(subagant, &self.config.cwd, &diff.diff),
|
||||
None => subagent_mock_response_from_diff(subagant, &self.config.cwd, ""),
|
||||
};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn process_new_conversation(&self, request_id: RequestId, params: NewConversationParams) {
|
||||
let config = match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(config) => config,
|
||||
|
||||
@@ -268,6 +268,7 @@ async fn run_codex_tool_session_inner(
|
||||
| EventMsg::ExecCommandOutputDelta(_)
|
||||
| EventMsg::ExecCommandEnd(_)
|
||||
| EventMsg::BackgroundEvent(_)
|
||||
| EventMsg::StreamError(_)
|
||||
| EventMsg::PatchApplyBegin(_)
|
||||
| EventMsg::PatchApplyEnd(_)
|
||||
| EventMsg::TurnDiff(_)
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
//! Prototype MCP server.
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
|
||||
use mcp_types::JSONRPCMessage;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
@@ -22,6 +27,7 @@ mod error_code;
|
||||
mod exec_approval;
|
||||
mod json_to_toml;
|
||||
pub(crate) mod message_processor;
|
||||
mod mock_data;
|
||||
mod outgoing_message;
|
||||
mod patch_approval;
|
||||
|
||||
@@ -41,7 +47,10 @@ pub use crate::patch_approval::PatchApprovalResponse;
|
||||
/// plenty for an interactive CLI.
|
||||
const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
pub async fn run_main(codex_linux_sandbox_exe: Option<PathBuf>) -> IoResult<()> {
|
||||
pub async fn run_main(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
) -> IoResult<()> {
|
||||
// Install a simple subscriber so `tracing` output is visible. Users can
|
||||
// control the log level with `RUST_LOG`.
|
||||
tracing_subscriber::fmt()
|
||||
@@ -77,10 +86,27 @@ pub async fn run_main(codex_linux_sandbox_exe: Option<PathBuf>) -> IoResult<()>
|
||||
}
|
||||
});
|
||||
|
||||
// Parse CLI overrides once and derive the base Config eagerly so later
|
||||
// components do not need to work with raw TOML values.
|
||||
let cli_kv_overrides = cli_config_overrides.parse_overrides().map_err(|e| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("error parsing -c overrides: {e}"),
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, ConfigOverrides::default())
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
// Task: process incoming messages.
|
||||
let processor_handle = tokio::spawn({
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
let mut processor = MessageProcessor::new(outgoing_message_sender, codex_linux_sandbox_exe);
|
||||
let mut processor = MessageProcessor::new(
|
||||
outgoing_message_sender,
|
||||
codex_linux_sandbox_exe,
|
||||
std::sync::Arc::new(config),
|
||||
);
|
||||
async move {
|
||||
while let Some(msg) = incoming_rx.recv().await {
|
||||
match msg {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use codex_arg0::arg0_dispatch_or_else;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_mcp_server::run_main;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
run_main(codex_linux_sandbox_exe).await?;
|
||||
run_main(codex_linux_sandbox_exe, CliConfigOverrides::default()).await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::codex_message_processor::CodexMessageProcessor;
|
||||
use crate::codex_tool_config::CodexToolCallParam;
|
||||
@@ -12,8 +11,9 @@ use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_protocol::mcp_protocol::ClientRequest;
|
||||
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config as CodexConfig;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::Submission;
|
||||
use codex_login::AuthManager;
|
||||
use mcp_types::CallToolRequestParams;
|
||||
use mcp_types::CallToolResult;
|
||||
use mcp_types::ClientRequest as McpClientRequest;
|
||||
@@ -30,6 +30,7 @@ use mcp_types::ServerCapabilitiesTools;
|
||||
use mcp_types::ServerNotification;
|
||||
use mcp_types::TextContent;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task;
|
||||
use uuid::Uuid;
|
||||
@@ -49,13 +50,18 @@ impl MessageProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: OutgoingMessageSender,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
let conversation_manager = Arc::new(ConversationManager::default());
|
||||
let auth_manager =
|
||||
AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
|
||||
let codex_message_processor = CodexMessageProcessor::new(
|
||||
auth_manager,
|
||||
conversation_manager.clone(),
|
||||
outgoing.clone(),
|
||||
codex_linux_sandbox_exe.clone(),
|
||||
config,
|
||||
);
|
||||
Self {
|
||||
codex_message_processor,
|
||||
@@ -344,7 +350,7 @@ impl MessageProcessor {
|
||||
}
|
||||
}
|
||||
async fn handle_tool_call_codex(&self, id: RequestId, arguments: Option<serde_json::Value>) {
|
||||
let (initial_prompt, config): (String, CodexConfig) = match arguments {
|
||||
let (initial_prompt, config): (String, Config) = match arguments {
|
||||
Some(json_val) => match serde_json::from_value::<CodexToolCallParam>(json_val) {
|
||||
Ok(tool_cfg) => match tool_cfg.into_config(self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(cfg) => cfg,
|
||||
|
||||
214
codex-rs/mcp-server/src/mock_data.rs
Normal file
214
codex-rs/mcp-server/src/mock_data.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::CodeLocation;
|
||||
use codex_protocol::mcp_protocol::Finding;
|
||||
use codex_protocol::mcp_protocol::LineRange;
|
||||
use codex_protocol::mcp_protocol::ReviewOutput;
|
||||
use codex_protocol::mcp_protocol::RunSubagentResponse;
|
||||
use codex_protocol::mcp_protocol::Subagent;
|
||||
use codex_protocol::mcp_protocol::SubagentOutput;
|
||||
|
||||
/// Build a mock response for Review subagent using actual unified diff text.
|
||||
pub(crate) fn subagent_mock_response_from_diff(
|
||||
subagent: Subagent,
|
||||
cwd: &Path,
|
||||
diff: &str,
|
||||
) -> RunSubagentResponse {
|
||||
match subagent {
|
||||
Subagent::Review => {
|
||||
let findings = review_findings_from_unified_diff(cwd, diff);
|
||||
RunSubagentResponse {
|
||||
output: SubagentOutput::Review(ReviewOutput { findings }),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a unified diff and generate representative findings mapped to changed hunks.
|
||||
fn review_findings_from_unified_diff(cwd: &Path, diff: &str) -> Vec<Finding> {
|
||||
const TITLES: &[&str] = &[
|
||||
"Add a clarifying comment",
|
||||
"Consider extracting a helper function",
|
||||
"Prefer descriptive variable names",
|
||||
"Validate inputs and handle errors early",
|
||||
"Document the intent of this change",
|
||||
"Consider reducing nesting with early returns",
|
||||
"Add unit tests for this branch",
|
||||
"Ensure consistent logging and levels",
|
||||
];
|
||||
const BODIES: &[&str] = &[
|
||||
"Add a comment to this line to explain the rationale.",
|
||||
"This logic could be extracted for readability and reuse.",
|
||||
"Use a more descriptive identifier to clarify the purpose.",
|
||||
"Add a guard clause to handle invalid or edge inputs.",
|
||||
"Add a doc comment describing the behavior for maintainers.",
|
||||
"Flatten control flow using early returns where safe.",
|
||||
"Add a focused test that covers this behavior.",
|
||||
"Use the shared logger and appropriate log level.",
|
||||
];
|
||||
|
||||
let mut findings: Vec<Finding> = Vec::new();
|
||||
let mut current_file: Option<PathBuf> = None;
|
||||
let mut in_hunk: bool = false;
|
||||
let mut new_line: u32 = 1;
|
||||
let mut template_index: usize = 0;
|
||||
|
||||
for line in diff.lines() {
|
||||
if line.starts_with("diff --git ") {
|
||||
current_file = None;
|
||||
in_hunk = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rest) = line.strip_prefix("+++ b/") {
|
||||
current_file = Some(cwd.join(rest.trim()));
|
||||
continue;
|
||||
}
|
||||
if line.starts_with("+++ ") || line.starts_with("--- ") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(hunk_header) = line.strip_prefix("@@") {
|
||||
if let Some((_, after_plus)) = hunk_header.split_once('+') {
|
||||
let mut range_text = after_plus.trim();
|
||||
if let Some((seg, _)) = range_text.split_once(' ') {
|
||||
range_text = seg;
|
||||
}
|
||||
let (start, _count) = parse_start_count(range_text);
|
||||
new_line = start;
|
||||
in_hunk = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if in_hunk {
|
||||
if line.starts_with(' ') {
|
||||
new_line = new_line.saturating_add(1);
|
||||
} else if line.starts_with('-') {
|
||||
// deletion: no advance of new_line
|
||||
} else if line.starts_with('+') && !line.starts_with("+++") {
|
||||
if let Some(path) = ¤t_file {
|
||||
let title = TITLES[template_index % TITLES.len()].to_string();
|
||||
let mut body = BODIES[template_index % BODIES.len()].to_string();
|
||||
let snippet = line.trim_start_matches('+').trim();
|
||||
if !snippet.is_empty() {
|
||||
body.push_str("\nSnippet: ");
|
||||
let truncated = if snippet.len() > 140 {
|
||||
let mut s = snippet[..140].to_string();
|
||||
s.push('…');
|
||||
s
|
||||
} else {
|
||||
snippet.to_string()
|
||||
};
|
||||
body.push_str(&truncated);
|
||||
}
|
||||
|
||||
findings.push(Finding {
|
||||
title,
|
||||
body,
|
||||
confidence_score: confidence_for_index(template_index),
|
||||
code_location: CodeLocation {
|
||||
absolute_file_path: to_forward_slashes(path),
|
||||
line_range: LineRange {
|
||||
start: new_line,
|
||||
end: new_line,
|
||||
},
|
||||
},
|
||||
});
|
||||
template_index += 1;
|
||||
}
|
||||
new_line = new_line.saturating_add(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if findings.len() > 50 {
|
||||
findings.truncate(50);
|
||||
}
|
||||
findings
|
||||
}
|
||||
|
||||
fn confidence_for_index(i: usize) -> f32 {
|
||||
let base = 0.72f32;
|
||||
let step = (i as f32 % 7.0) * 0.03;
|
||||
(base + step).min(0.95)
|
||||
}
|
||||
|
||||
fn parse_start_count(text: &str) -> (u32, u32) {
|
||||
// Formats: "123,45" or just "123"
|
||||
if let Some((start_str, count_str)) = text.split_once(',') {
|
||||
let start = start_str
|
||||
.trim()
|
||||
.trim_start_matches('+')
|
||||
.parse()
|
||||
.unwrap_or(1);
|
||||
let count = count_str.trim().parse().unwrap_or(1);
|
||||
(start as u32, count as u32)
|
||||
} else {
|
||||
let start = text.trim().trim_start_matches('+').parse().unwrap_or(1);
|
||||
(start as u32, 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn to_forward_slashes(path: &Path) -> String {
|
||||
path.to_string_lossy().replace('\\', "/")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn returns_empty_findings_for_empty_diff() {
|
||||
let cwd = Path::new("/tmp");
|
||||
let resp = subagent_mock_response_from_diff(Subagent::Review, cwd, "");
|
||||
match resp.output {
|
||||
SubagentOutput::Review(ReviewOutput { findings }) => {
|
||||
assert!(findings.is_empty(), "Expected no findings for empty diff");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generates_findings_for_added_lines_with_correct_locations() {
|
||||
let cwd = Path::new("/repo");
|
||||
let diff = r#"diff --git a/src/lib.rs b/src/lib.rs
|
||||
index 1111111..2222222 100644
|
||||
--- a/src/lib.rs
|
||||
+++ b/src/lib.rs
|
||||
@@ -10,2 +10,4 @@
|
||||
context
|
||||
+let x = 1;
|
||||
+// TODO: add docs
|
||||
context
|
||||
@@ -50,3 +52,5 @@
|
||||
-context
|
||||
+context changed
|
||||
+fn new_fn() {}
|
||||
+// comment
|
||||
"#;
|
||||
|
||||
let resp = subagent_mock_response_from_diff(Subagent::Review, cwd, diff);
|
||||
match resp.output {
|
||||
SubagentOutput::Review(ReviewOutput { findings }) => {
|
||||
// Added lines: 2 in first hunk, 3 in second hunk => 5 findings
|
||||
assert_eq!(findings.len(), 5, "Expected one finding per added line");
|
||||
|
||||
// Validate file path and line numbers for the first two additions
|
||||
let file_path = "/repo/src/lib.rs".to_string();
|
||||
assert_eq!(findings[0].code_location.absolute_file_path, file_path);
|
||||
assert_eq!(findings[0].code_location.line_range.start, 11);
|
||||
assert_eq!(findings[0].code_location.line_range.end, 11);
|
||||
assert_eq!(findings[1].code_location.absolute_file_path, file_path);
|
||||
assert_eq!(findings[1].code_location.line_range.start, 12);
|
||||
assert_eq!(findings[1].code_location.line_range.end, 12);
|
||||
|
||||
// Validate second hunk first two additions start at 52, then 53
|
||||
assert_eq!(findings[2].code_location.line_range.start, 52);
|
||||
assert_eq!(findings[3].code_location.line_range.start, 53);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use codex_core::protocol::Event;
|
||||
use codex_protocol::mcp_protocol::ServerNotification;
|
||||
use mcp_types::JSONRPC_VERSION;
|
||||
use mcp_types::JSONRPCError;
|
||||
use mcp_types::JSONRPCErrorError;
|
||||
@@ -121,6 +122,17 @@ impl OutgoingMessageSender {
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(crate) async fn send_server_notification(&self, notification: ServerNotification) {
|
||||
let method = format!("codex/event/{}", notification);
|
||||
let params = match serde_json::to_value(¬ification) {
|
||||
Ok(serde_json::Value::Object(mut map)) => map.remove("data"),
|
||||
_ => None,
|
||||
};
|
||||
let outgoing_message =
|
||||
OutgoingMessage::Notification(OutgoingNotification { method, params });
|
||||
let _ = self.sender.send(outgoing_message).await;
|
||||
}
|
||||
|
||||
pub(crate) async fn send_notification(&self, notification: OutgoingNotification) {
|
||||
let outgoing_message = OutgoingMessage::Notification(notification);
|
||||
let _ = self.sender.send(outgoing_message).await;
|
||||
|
||||
142
codex-rs/mcp-server/tests/auth.rs
Normal file
142
codex-rs/mcp-server/tests/auth.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use std::path::Path;
|
||||
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
use mcp_types::RequestId;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
// Helper to create a config.toml; mirrors create_conversation.rs
|
||||
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "danger-full-access"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "http://127.0.0.1:0/v1"
|
||||
wire_api = "chat"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_no_auth() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
let request_id = mcp
|
||||
.send_get_auth_status_request(GetAuthStatusParams {
|
||||
include_token: Some(true),
|
||||
refresh_token: Some(false),
|
||||
})
|
||||
.await
|
||||
.expect("send getAuthStatus");
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("getAuthStatus timeout")
|
||||
.expect("getAuthStatus response");
|
||||
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, None, "expected no auth method");
|
||||
assert_eq!(status.auth_token, None, "expected no token");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_with_api_key() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
let request_id = mcp
|
||||
.send_get_auth_status_request(GetAuthStatusParams {
|
||||
include_token: Some(true),
|
||||
refresh_token: Some(false),
|
||||
})
|
||||
.await
|
||||
.expect("send getAuthStatus");
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("getAuthStatus timeout")
|
||||
.expect("getAuthStatus response");
|
||||
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
|
||||
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
|
||||
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_with_api_key_no_include_token() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
// Build params via struct so None field is omitted in wire JSON.
|
||||
let params = GetAuthStatusParams {
|
||||
include_token: None,
|
||||
refresh_token: Some(false),
|
||||
};
|
||||
let request_id = mcp
|
||||
.send_get_auth_status_request(params)
|
||||
.await
|
||||
.expect("send getAuthStatus");
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("getAuthStatus timeout")
|
||||
.expect("getAuthStatus response");
|
||||
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
|
||||
assert!(status.auth_token.is_none(), "token must be omitted");
|
||||
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
|
||||
}
|
||||
@@ -86,9 +86,7 @@ async fn shell_command_approval_triggers_elicitation() -> anyhow::Result<()> {
|
||||
)
|
||||
.await??;
|
||||
|
||||
// This is the first request from the server, so the id should be 0 given
|
||||
// how things are currently implemented.
|
||||
let elicitation_request_id = RequestId::Integer(0);
|
||||
let elicitation_request_id = elicitation_request.id.clone();
|
||||
let params = serde_json::from_value::<ExecApprovalElicitRequestParams>(
|
||||
elicitation_request
|
||||
.params
|
||||
|
||||
@@ -13,6 +13,8 @@ use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_mcp_server::CodexToolCallParam;
|
||||
use codex_protocol::mcp_protocol::AddConversationListenerParams;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationParams;
|
||||
use codex_protocol::mcp_protocol::NewConversationParams;
|
||||
use codex_protocol::mcp_protocol::RemoveConversationListenerParams;
|
||||
@@ -217,6 +219,34 @@ impl McpProcess {
|
||||
self.send_request("interruptConversation", params).await
|
||||
}
|
||||
|
||||
/// Send a `getAuthStatus` JSON-RPC request.
|
||||
pub async fn send_get_auth_status_request(
|
||||
&mut self,
|
||||
params: GetAuthStatusParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("getAuthStatus", params).await
|
||||
}
|
||||
|
||||
/// Send a `loginChatGpt` JSON-RPC request.
|
||||
pub async fn send_login_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
|
||||
self.send_request("loginChatGpt", None).await
|
||||
}
|
||||
|
||||
/// Send a `cancelLoginChatGpt` JSON-RPC request.
|
||||
pub async fn send_cancel_login_chat_gpt_request(
|
||||
&mut self,
|
||||
params: CancelLoginChatGptParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("cancelLoginChatGpt", params).await
|
||||
}
|
||||
|
||||
/// Send a `logoutChatGpt` JSON-RPC request.
|
||||
pub async fn send_logout_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
|
||||
self.send_request("logoutChatGpt", None).await
|
||||
}
|
||||
|
||||
async fn send_request(
|
||||
&mut self,
|
||||
method: &str,
|
||||
|
||||
146
codex-rs/mcp-server/tests/login.rs
Normal file
146
codex-rs/mcp-server/tests/login.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptResponse;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusResponse;
|
||||
use codex_protocol::mcp_protocol::LoginChatGptResponse;
|
||||
use codex_protocol::mcp_protocol::LogoutChatGptResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
use mcp_types::RequestId;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
// Helper to create a config.toml; mirrors create_conversation.rs
|
||||
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "danger-full-access"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "http://127.0.0.1:0/v1"
|
||||
wire_api = "chat"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn logout_chatgpt_removes_auth() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
assert!(codex_home.path().join("auth.json").exists());
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
let id = mcp
|
||||
.send_logout_chat_gpt_request()
|
||||
.await
|
||||
.expect("send logoutChatGpt");
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(id)),
|
||||
)
|
||||
.await
|
||||
.expect("logoutChatGpt timeout")
|
||||
.expect("logoutChatGpt response");
|
||||
let _ok: LogoutChatGptResponse = to_response(resp).expect("deserialize logout response");
|
||||
|
||||
assert!(
|
||||
!codex_home.path().join("auth.json").exists(),
|
||||
"auth.json should be deleted"
|
||||
);
|
||||
|
||||
// Verify status reflects signed-out state.
|
||||
let status_id = mcp
|
||||
.send_get_auth_status_request(GetAuthStatusParams {
|
||||
include_token: Some(true),
|
||||
refresh_token: Some(false),
|
||||
})
|
||||
.await
|
||||
.expect("send getAuthStatus");
|
||||
let status_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(status_id)),
|
||||
)
|
||||
.await
|
||||
.expect("getAuthStatus timeout")
|
||||
.expect("getAuthStatus response");
|
||||
let status: GetAuthStatusResponse = to_response(status_resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, None);
|
||||
assert_eq!(status.auth_token, None);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn login_and_cancel_chatgpt() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
let login_id = mcp
|
||||
.send_login_chat_gpt_request()
|
||||
.await
|
||||
.expect("send loginChatGpt");
|
||||
let login_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
|
||||
)
|
||||
.await
|
||||
.expect("loginChatGpt timeout")
|
||||
.expect("loginChatGpt response");
|
||||
let login: LoginChatGptResponse = to_response(login_resp).expect("deserialize login resp");
|
||||
|
||||
let cancel_id = mcp
|
||||
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
|
||||
login_id: login.login_id,
|
||||
})
|
||||
.await
|
||||
.expect("send cancelLoginChatGpt");
|
||||
let cancel_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
||||
)
|
||||
.await
|
||||
.expect("cancelLoginChatGpt timeout")
|
||||
.expect("cancelLoginChatGpt response");
|
||||
let _ok: CancelLoginChatGptResponse =
|
||||
to_response(cancel_resp).expect("deserialize cancel response");
|
||||
|
||||
// Optionally observe the completion notification; do not fail if it races.
|
||||
let maybe_note = timeout(
|
||||
Duration::from_secs(2),
|
||||
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
|
||||
)
|
||||
.await;
|
||||
if maybe_note.is_err() {
|
||||
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
|
||||
}
|
||||
}
|
||||
@@ -32,15 +32,21 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
codex_protocol::mcp_protocol::SendUserTurnResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::InterruptConversationParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::InterruptConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GitDiffToRemoteParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GitDiffToRemoteResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginChatGptCompleteNotification::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::CancelLoginChatGptParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::CancelLoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GitDiffToRemoteParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LogoutChatGptParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LogoutChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GetAuthStatusParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GetAuthStatusResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ApplyPatchApprovalParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ApplyPatchApprovalResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ExecCommandApprovalParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::ServerNotification::export_all_to(out_dir)?;
|
||||
|
||||
// Prepend header to each generated .ts file
|
||||
let ts_files = ts_files_in(out_dir)?;
|
||||
|
||||
@@ -11,12 +11,15 @@ path = "src/lib.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.22.1"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
mime_guess = "2.0.5"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_bytes = "0.11"
|
||||
serde_json = "1"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
tracing = "0.1.41"
|
||||
ts-rs = { version = "11", features = ["uuid-impl", "serde-json-impl"] }
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
pub mod config_types;
|
||||
pub mod mcp_protocol;
|
||||
pub mod message_history;
|
||||
pub mod models;
|
||||
pub mod parse_command;
|
||||
pub mod plan_tool;
|
||||
pub mod protocol;
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::protocol::TurnAbortReason;
|
||||
use mcp_types::RequestId;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display;
|
||||
use ts_rs::TS;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -36,6 +37,13 @@ impl GitSha {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, TS)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AuthMode {
|
||||
ApiKey,
|
||||
ChatGPT,
|
||||
}
|
||||
|
||||
/// Request from the client to the server.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(tag = "method", rename_all = "camelCase")]
|
||||
@@ -70,6 +78,11 @@ pub enum ClientRequest {
|
||||
request_id: RequestId,
|
||||
params: RemoveConversationListenerParams,
|
||||
},
|
||||
GitDiffToRemote {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: GitDiffToRemoteParams,
|
||||
},
|
||||
LoginChatGpt {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
@@ -79,13 +92,76 @@ pub enum ClientRequest {
|
||||
request_id: RequestId,
|
||||
params: CancelLoginChatGptParams,
|
||||
},
|
||||
GitDiffToRemote {
|
||||
LogoutChatGpt {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: GitDiffToRemoteParams,
|
||||
},
|
||||
GetAuthStatus {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: GetAuthStatusParams,
|
||||
},
|
||||
RunSubagent {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: RunSubagentParams,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunSubagentParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub subagant: Subagent,
|
||||
pub input: Option<Vec<InputItem>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Subagent {
|
||||
Review,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(tag = "type", content = "data", rename_all = "camelCase")]
|
||||
pub enum SubagentOutput {
|
||||
Review(ReviewOutput),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunSubagentResponse {
|
||||
pub output: SubagentOutput,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct ReviewOutput {
|
||||
pub findings: Vec<Finding>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct Finding {
|
||||
pub title: String,
|
||||
pub body: String,
|
||||
pub confidence_score: f32,
|
||||
pub code_location: CodeLocation,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct CodeLocation {
|
||||
pub absolute_file_path: String,
|
||||
pub line_range: LineRange,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct LineRange {
|
||||
pub start: u32,
|
||||
pub end: u32,
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NewConversationParams {
|
||||
@@ -161,18 +237,6 @@ pub struct GitDiffToRemoteResponse {
|
||||
pub diff: String,
|
||||
}
|
||||
|
||||
// Event name for notifying client of login completion or failure.
|
||||
pub const LOGIN_CHATGPT_COMPLETE_EVENT: &str = "codex/event/login_chatgpt_complete";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginChatGptCompleteNotification {
|
||||
pub login_id: Uuid,
|
||||
pub success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelLoginChatGptParams {
|
||||
@@ -189,6 +253,35 @@ pub struct GitDiffToRemoteParams {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelLoginChatGptResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogoutChatGptParams {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogoutChatGptResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAuthStatusParams {
|
||||
/// If true, include the current auth token (if available) in the response.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub include_token: Option<bool>,
|
||||
/// If true, attempt to refresh the token before returning status.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub refresh_token: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetAuthStatusResponse {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auth_method: Option<AuthMode>,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auth_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserMessageParams {
|
||||
@@ -321,6 +414,34 @@ pub struct ApplyPatchApprovalResponse {
|
||||
pub decision: ReviewDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginChatGptCompleteNotification {
|
||||
pub login_id: Uuid,
|
||||
pub success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthStatusChangeNotification {
|
||||
/// Current authentication method; omitted if signed out.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auth_method: Option<AuthMode>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS, Display)]
|
||||
#[serde(tag = "type", content = "data", rename_all = "snake_case")]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum ServerNotification {
|
||||
/// Authentication status changed
|
||||
AuthStatusChange(AuthStatusChangeNotification),
|
||||
|
||||
/// ChatGPT login flow completed
|
||||
LoginChatGptComplete(LoginChatGptCompleteNotification),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -24,6 +24,10 @@ pub enum ResponseInputItem {
|
||||
call_id: String,
|
||||
result: Result<CallToolResult, String>,
|
||||
},
|
||||
CustomToolCallOutput {
|
||||
call_id: String,
|
||||
output: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
@@ -77,6 +81,20 @@ pub enum ResponseItem {
|
||||
call_id: String,
|
||||
output: FunctionCallOutputPayload,
|
||||
},
|
||||
CustomToolCall {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
id: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
status: Option<String>,
|
||||
|
||||
call_id: String,
|
||||
name: String,
|
||||
input: String,
|
||||
},
|
||||
CustomToolCallOutput {
|
||||
call_id: String,
|
||||
output: String,
|
||||
},
|
||||
#[serde(other)]
|
||||
Other,
|
||||
}
|
||||
@@ -114,6 +132,9 @@ impl From<ResponseInputItem> for ResponseItem {
|
||||
),
|
||||
},
|
||||
},
|
||||
ResponseInputItem::CustomToolCallOutput { call_id, output } => {
|
||||
Self::CustomToolCallOutput { call_id, output }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -183,7 +204,6 @@ impl From<Vec<InputItem>> for ResponseInputItem {
|
||||
None
|
||||
}
|
||||
},
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<ContentItem>>(),
|
||||
}
|
||||
@@ -197,10 +217,8 @@ pub struct ShellToolCallParams {
|
||||
pub command: Vec<String>,
|
||||
pub workdir: Option<String>,
|
||||
|
||||
/// This is the maximum time in seconds that the command is allowed to run.
|
||||
#[serde(rename = "timeout")]
|
||||
// The wire format uses `timeout`, which has ambiguous units, so we use
|
||||
// `timeout_ms` as the field name so it is clear in code.
|
||||
/// This is the maximum time in milliseconds that the command is allowed to run.
|
||||
#[serde(alias = "timeout")]
|
||||
pub timeout_ms: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
@@ -2,6 +2,7 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum ParsedCommand {
|
||||
Read {
|
||||
cmd: String,
|
||||
|
||||
@@ -446,6 +446,10 @@ pub enum EventMsg {
|
||||
|
||||
BackgroundEvent(BackgroundEventEvent),
|
||||
|
||||
/// Notification that a model stream experienced an error or disconnect
|
||||
/// and the system is handling it (e.g., retrying with backoff).
|
||||
StreamError(StreamErrorEvent),
|
||||
|
||||
/// Notification that the agent is about to apply a code patch. Mirrors
|
||||
/// `ExecCommandBegin` so front‑ends can show progress indicators.
|
||||
PatchApplyBegin(PatchApplyBeginEvent),
|
||||
@@ -721,6 +725,11 @@ pub struct BackgroundEventEvent {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct StreamErrorEvent {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct PatchApplyBeginEvent {
|
||||
/// Identifier so this can be paired with the PatchApplyEnd event.
|
||||
|
||||
@@ -22,6 +22,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
arboard = "3"
|
||||
async-stream = "0.3.6"
|
||||
base64 = "0.22.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
@@ -41,7 +42,10 @@ codex-protocol = { path = "../protocol" }
|
||||
color-eyre = "0.6.3"
|
||||
crossterm = { version = "0.28.1", features = ["bracketed-paste", "event-stream"] }
|
||||
diffy = "0.4.2"
|
||||
image = { version = "^0.25.6", default-features = false, features = ["jpeg"] }
|
||||
image = { version = "^0.25.6", default-features = false, features = [
|
||||
"jpeg",
|
||||
"png",
|
||||
] }
|
||||
lazy_static = "1"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
once_cell = "1"
|
||||
@@ -61,6 +65,7 @@ shlex = "1.3.0"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
supports-color = "3.0.2"
|
||||
tempfile = "3"
|
||||
textwrap = "0.16.2"
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
|
||||
@@ -2,20 +2,21 @@ use crate::app_event::AppEvent;
|
||||
use crate::app_event_sender::AppEventSender;
|
||||
use crate::chatwidget::ChatWidget;
|
||||
use crate::file_search::FileSearchManager;
|
||||
use crate::get_git_diff::get_git_diff;
|
||||
use crate::slash_command::SlashCommand;
|
||||
use crate::transcript_app::TranscriptApp;
|
||||
use crate::tui;
|
||||
use crate::tui::TuiEvent;
|
||||
use codex_ansi_escape::ansi_escape_line;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_login::AuthManager;
|
||||
use color_eyre::eyre::Result;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyEventKind;
|
||||
use crossterm::terminal::supports_keyboard_enhancement;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -35,6 +36,12 @@ pub(crate) struct App {
|
||||
|
||||
file_search: FileSearchManager,
|
||||
|
||||
transcript_lines: Vec<Line<'static>>,
|
||||
|
||||
// Transcript overlay state
|
||||
transcript_overlay: Option<TranscriptApp>,
|
||||
deferred_history_lines: Vec<Line<'static>>,
|
||||
|
||||
enhanced_keys_supported: bool,
|
||||
|
||||
/// Controls the animation thread that sends CommitTick events.
|
||||
@@ -44,6 +51,7 @@ pub(crate) struct App {
|
||||
impl App {
|
||||
pub async fn run(
|
||||
tui: &mut tui::Tui,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
config: Config,
|
||||
initial_prompt: Option<String>,
|
||||
initial_images: Vec<PathBuf>,
|
||||
@@ -52,7 +60,7 @@ impl App {
|
||||
let (app_event_tx, mut app_event_rx) = unbounded_channel();
|
||||
let app_event_tx = AppEventSender::new(app_event_tx);
|
||||
|
||||
let conversation_manager = Arc::new(ConversationManager::default());
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
|
||||
|
||||
let enhanced_keys_supported = supports_keyboard_enhancement().unwrap_or(false);
|
||||
|
||||
@@ -75,6 +83,9 @@ impl App {
|
||||
config,
|
||||
file_search,
|
||||
enhanced_keys_supported,
|
||||
transcript_lines: Vec::new(),
|
||||
transcript_overlay: None,
|
||||
deferred_history_lines: Vec::new(),
|
||||
commit_anim_running: Arc::new(AtomicBool::new(false)),
|
||||
};
|
||||
|
||||
@@ -100,34 +111,51 @@ impl App {
|
||||
tui: &mut tui::Tui,
|
||||
event: TuiEvent,
|
||||
) -> Result<bool> {
|
||||
match event {
|
||||
TuiEvent::Key(key_event) => {
|
||||
self.handle_key_event(key_event).await;
|
||||
if let Some(overlay) = &mut self.transcript_overlay {
|
||||
overlay.handle_event(tui, event)?;
|
||||
if overlay.is_done {
|
||||
// Exit alternate screen and restore viewport.
|
||||
let _ = tui.leave_alt_screen();
|
||||
if !self.deferred_history_lines.is_empty() {
|
||||
let lines = std::mem::take(&mut self.deferred_history_lines);
|
||||
tui.insert_history_lines(lines);
|
||||
}
|
||||
self.transcript_overlay = None;
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
TuiEvent::Paste(pasted) => {
|
||||
// Many terminals convert newlines to \r when pasting (e.g., iTerm2),
|
||||
// but tui-textarea expects \n. Normalize CR to LF.
|
||||
// [tui-textarea]: https://github.com/rhysd/tui-textarea/blob/4d18622eeac13b309e0ff6a55a46ac6706da68cf/src/textarea.rs#L782-L783
|
||||
// [iTerm2]: https://github.com/gnachman/iTerm2/blob/5d0c0d9f68523cbd0494dad5422998964a2ecd8d/sources/iTermPasteHelper.m#L206-L216
|
||||
let pasted = pasted.replace("\r", "\n");
|
||||
self.chat_widget.handle_paste(pasted);
|
||||
}
|
||||
TuiEvent::Draw => {
|
||||
tui.draw(
|
||||
self.chat_widget.desired_height(tui.terminal.size()?.width),
|
||||
|frame| {
|
||||
frame.render_widget_ref(&self.chat_widget, frame.area());
|
||||
if let Some((x, y)) = self.chat_widget.cursor_pos(frame.area()) {
|
||||
frame.set_cursor_position((x, y));
|
||||
}
|
||||
},
|
||||
)?;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
TuiEvent::ResumeFromSuspend => {
|
||||
let cursor_pos = tui.terminal.get_cursor_position()?;
|
||||
tui.terminal
|
||||
.set_viewport_area(ratatui::layout::Rect::new(0, cursor_pos.y, 0, 0));
|
||||
} else {
|
||||
match event {
|
||||
TuiEvent::Key(key_event) => {
|
||||
self.handle_key_event(tui, key_event).await;
|
||||
}
|
||||
TuiEvent::Paste(pasted) => {
|
||||
// Many terminals convert newlines to \r when pasting (e.g., iTerm2),
|
||||
// but tui-textarea expects \n. Normalize CR to LF.
|
||||
// [tui-textarea]: https://github.com/rhysd/tui-textarea/blob/4d18622eeac13b309e0ff6a55a46ac6706da68cf/src/textarea.rs#L782-L783
|
||||
// [iTerm2]: https://github.com/gnachman/iTerm2/blob/5d0c0d9f68523cbd0494dad5422998964a2ecd8d/sources/iTermPasteHelper.m#L206-L216
|
||||
let pasted = pasted.replace("\r", "\n");
|
||||
self.chat_widget.handle_paste(pasted);
|
||||
}
|
||||
TuiEvent::Draw => {
|
||||
tui.draw(
|
||||
self.chat_widget.desired_height(tui.terminal.size()?.width),
|
||||
|frame| {
|
||||
frame.render_widget_ref(&self.chat_widget, frame.area());
|
||||
if let Some((x, y)) = self.chat_widget.cursor_pos(frame.area()) {
|
||||
frame.set_cursor_position((x, y));
|
||||
}
|
||||
},
|
||||
)?;
|
||||
}
|
||||
TuiEvent::AttachImage {
|
||||
path,
|
||||
width,
|
||||
height,
|
||||
format_label,
|
||||
} => {
|
||||
self.chat_widget
|
||||
.attach_image(path, width, height, format_label);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
@@ -135,8 +163,44 @@ impl App {
|
||||
|
||||
fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
|
||||
match event {
|
||||
AppEvent::InsertHistory(lines) => {
|
||||
tui.insert_history_lines(lines);
|
||||
AppEvent::NewSession => {
|
||||
self.chat_widget = ChatWidget::new(
|
||||
self.config.clone(),
|
||||
self.server.clone(),
|
||||
tui.frame_requester(),
|
||||
self.app_event_tx.clone(),
|
||||
None,
|
||||
Vec::new(),
|
||||
self.enhanced_keys_supported,
|
||||
);
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
AppEvent::InsertHistoryLines(lines) => {
|
||||
if let Some(overlay) = &mut self.transcript_overlay {
|
||||
overlay.insert_lines(lines.clone());
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
self.transcript_lines.extend(lines.clone());
|
||||
if self.transcript_overlay.is_some() {
|
||||
self.deferred_history_lines.extend(lines);
|
||||
} else {
|
||||
tui.insert_history_lines(lines);
|
||||
}
|
||||
}
|
||||
AppEvent::InsertHistoryCell(cell) => {
|
||||
if let Some(overlay) = &mut self.transcript_overlay {
|
||||
overlay.insert_lines(cell.transcript_lines());
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
self.transcript_lines.extend(cell.transcript_lines());
|
||||
let display = cell.display_lines();
|
||||
if !display.is_empty() {
|
||||
if self.transcript_overlay.is_some() {
|
||||
self.deferred_history_lines.extend(display);
|
||||
} else {
|
||||
tui.insert_history_lines(display);
|
||||
}
|
||||
}
|
||||
}
|
||||
AppEvent::StartCommitAnimation => {
|
||||
if self
|
||||
@@ -168,113 +232,21 @@ impl App {
|
||||
}
|
||||
AppEvent::CodexOp(op) => self.chat_widget.submit_op(op),
|
||||
AppEvent::DiffResult(text) => {
|
||||
self.chat_widget.add_diff_output(text);
|
||||
// Clear the in-progress state in the bottom pane
|
||||
self.chat_widget.on_diff_complete();
|
||||
// Enter alternate screen using TUI helper and build pager lines
|
||||
let _ = tui.enter_alt_screen();
|
||||
let pager_lines: Vec<ratatui::text::Line<'static>> = if text.trim().is_empty() {
|
||||
vec!["No changes detected.".italic().into()]
|
||||
} else {
|
||||
text.lines().map(ansi_escape_line).collect()
|
||||
};
|
||||
self.transcript_overlay = Some(TranscriptApp::with_title(
|
||||
pager_lines,
|
||||
"D I F F".to_string(),
|
||||
));
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
AppEvent::DispatchCommand(command) => match command {
|
||||
SlashCommand::New => {
|
||||
// User accepted – switch to chat view.
|
||||
let new_widget = ChatWidget::new(
|
||||
self.config.clone(),
|
||||
self.server.clone(),
|
||||
tui.frame_requester(),
|
||||
self.app_event_tx.clone(),
|
||||
None,
|
||||
Vec::new(),
|
||||
self.enhanced_keys_supported,
|
||||
);
|
||||
self.chat_widget = new_widget;
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
SlashCommand::Init => {
|
||||
// Guard: do not run if a task is active.
|
||||
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
|
||||
self.chat_widget
|
||||
.submit_text_message(INIT_PROMPT.to_string());
|
||||
}
|
||||
SlashCommand::Compact => {
|
||||
self.chat_widget.clear_token_usage();
|
||||
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
|
||||
}
|
||||
SlashCommand::Model => {
|
||||
self.chat_widget.open_model_popup();
|
||||
}
|
||||
SlashCommand::Approvals => {
|
||||
self.chat_widget.open_approvals_popup();
|
||||
}
|
||||
SlashCommand::Quit => {
|
||||
return Ok(false);
|
||||
}
|
||||
SlashCommand::Logout => {
|
||||
if let Err(e) = codex_login::logout(&self.config.codex_home) {
|
||||
tracing::error!("failed to logout: {e}");
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
SlashCommand::Diff => {
|
||||
self.chat_widget.add_diff_in_progress();
|
||||
let tx = self.app_event_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let text = match get_git_diff().await {
|
||||
Ok((is_git_repo, diff_text)) => {
|
||||
if is_git_repo {
|
||||
diff_text
|
||||
} else {
|
||||
"`/diff` — _not inside a git repository_".to_string()
|
||||
}
|
||||
}
|
||||
Err(e) => format!("Failed to compute diff: {e}"),
|
||||
};
|
||||
tx.send(AppEvent::DiffResult(text));
|
||||
});
|
||||
}
|
||||
SlashCommand::Mention => {
|
||||
self.chat_widget.insert_str("@");
|
||||
}
|
||||
SlashCommand::Status => {
|
||||
self.chat_widget.add_status_output();
|
||||
}
|
||||
SlashCommand::Mcp => {
|
||||
self.chat_widget.add_mcp_output();
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
SlashCommand::TestApproval => {
|
||||
use codex_core::protocol::EventMsg;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_core::protocol::FileChange;
|
||||
|
||||
self.app_event_tx.send(AppEvent::CodexEvent(Event {
|
||||
id: "1".to_string(),
|
||||
// msg: EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
|
||||
// call_id: "1".to_string(),
|
||||
// command: vec!["git".into(), "apply".into()],
|
||||
// cwd: self.config.cwd.clone(),
|
||||
// reason: Some("test".to_string()),
|
||||
// }),
|
||||
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
|
||||
call_id: "1".to_string(),
|
||||
changes: HashMap::from([
|
||||
(
|
||||
PathBuf::from("/tmp/test.txt"),
|
||||
FileChange::Add {
|
||||
content: "test".to_string(),
|
||||
},
|
||||
),
|
||||
(
|
||||
PathBuf::from("/tmp/test2.txt"),
|
||||
FileChange::Update {
|
||||
unified_diff: "+test\n-test2".to_string(),
|
||||
move_path: None,
|
||||
},
|
||||
),
|
||||
]),
|
||||
reason: None,
|
||||
grant_root: Some(PathBuf::from("/tmp")),
|
||||
}),
|
||||
}));
|
||||
}
|
||||
},
|
||||
AppEvent::StartFileSearch(query) => {
|
||||
if !query.is_empty() {
|
||||
self.file_search.on_user_query(query);
|
||||
@@ -303,7 +275,7 @@ impl App {
|
||||
self.chat_widget.token_usage().clone()
|
||||
}
|
||||
|
||||
async fn handle_key_event(&mut self, key_event: KeyEvent) {
|
||||
async fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) {
|
||||
match key_event {
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('c'),
|
||||
@@ -321,6 +293,17 @@ impl App {
|
||||
} if self.chat_widget.composer_is_empty() => {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('t'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
} => {
|
||||
// Enter alternate screen and set viewport to full size.
|
||||
let _ = tui.enter_alt_screen();
|
||||
self.transcript_overlay = Some(TranscriptApp::new(self.transcript_lines.clone()));
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
KeyEvent {
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
|
||||
@@ -2,7 +2,8 @@ use codex_core::protocol::Event;
|
||||
use codex_file_search::FileMatch;
|
||||
use ratatui::text::Line;
|
||||
|
||||
use crate::slash_command::SlashCommand;
|
||||
use crate::history_cell::HistoryCell;
|
||||
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
@@ -12,6 +13,9 @@ use codex_core::protocol_config_types::ReasoningEffort;
|
||||
pub(crate) enum AppEvent {
|
||||
CodexEvent(Event),
|
||||
|
||||
/// Start a new session.
|
||||
NewSession,
|
||||
|
||||
/// Request to exit the application gracefully.
|
||||
ExitRequest,
|
||||
|
||||
@@ -19,10 +23,6 @@ pub(crate) enum AppEvent {
|
||||
/// bubbling channels through layers of widgets.
|
||||
CodexOp(codex_core::protocol::Op),
|
||||
|
||||
/// Dispatch a recognized slash command from the UI (composer) to the app
|
||||
/// layer so it can be handled centrally.
|
||||
DispatchCommand(SlashCommand),
|
||||
|
||||
/// Kick off an asynchronous file search for the given query (text after
|
||||
/// the `@`). Previous searches may be cancelled by the app layer so there
|
||||
/// is at most one in-flight search.
|
||||
@@ -39,7 +39,8 @@ pub(crate) enum AppEvent {
|
||||
/// Result of computing a `/diff` command.
|
||||
DiffResult(String),
|
||||
|
||||
InsertHistory(Vec<Line<'static>>),
|
||||
InsertHistoryLines(Vec<Line<'static>>),
|
||||
InsertHistoryCell(Box<dyn HistoryCell>),
|
||||
|
||||
StartCommitAnimation,
|
||||
StopCommitAnimation,
|
||||
|
||||
@@ -28,6 +28,11 @@ pub(crate) trait BottomPaneView {
|
||||
/// Render the view: this will be displayed in place of the composer.
|
||||
fn render(&self, area: Rect, buf: &mut Buffer);
|
||||
|
||||
/// Update the status indicator animated header. Default no-op.
|
||||
fn update_status_header(&mut self, _header: String) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
/// Called when task completes to check if the view should be hidden.
|
||||
fn should_hide_when_task_is_done(&mut self) -> bool {
|
||||
false
|
||||
|
||||
@@ -23,6 +23,7 @@ use ratatui::widgets::WidgetRef;
|
||||
use super::chat_composer_history::ChatComposerHistory;
|
||||
use super::command_popup::CommandPopup;
|
||||
use super::file_search_popup::FileSearchPopup;
|
||||
use crate::slash_command::SlashCommand;
|
||||
|
||||
use crate::app_event::AppEvent;
|
||||
use crate::app_event_sender::AppEventSender;
|
||||
@@ -30,6 +31,16 @@ use crate::bottom_pane::textarea::TextArea;
|
||||
use crate::bottom_pane::textarea::TextAreaState;
|
||||
use codex_file_search::FileMatch;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
// Heuristic thresholds for detecting paste-like input bursts.
|
||||
const PASTE_BURST_MIN_CHARS: u16 = 3;
|
||||
const PASTE_BURST_CHAR_INTERVAL: Duration = Duration::from_millis(8);
|
||||
const PASTE_ENTER_SUPPRESS_WINDOW: Duration = Duration::from_millis(120);
|
||||
|
||||
/// If the pasted content exceeds this number of characters, replace it with a
|
||||
/// placeholder in the UI.
|
||||
@@ -38,9 +49,16 @@ const LARGE_PASTE_CHAR_THRESHOLD: usize = 1000;
|
||||
/// Result returned when the user interacts with the text area.
|
||||
pub enum InputResult {
|
||||
Submitted(String),
|
||||
Command(SlashCommand),
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
struct AttachedImage {
|
||||
placeholder: String,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
struct TokenUsageInfo {
|
||||
total_token_usage: TokenUsage,
|
||||
last_token_usage: TokenUsage,
|
||||
@@ -69,7 +87,15 @@ pub(crate) struct ChatComposer {
|
||||
pending_pastes: Vec<(String, String)>,
|
||||
token_usage_info: Option<TokenUsageInfo>,
|
||||
has_focus: bool,
|
||||
attached_images: Vec<AttachedImage>,
|
||||
placeholder_text: String,
|
||||
// Heuristic state to detect non-bracketed paste bursts.
|
||||
last_plain_char_time: Option<Instant>,
|
||||
consecutive_plain_char_burst: u16,
|
||||
paste_burst_until: Option<Instant>,
|
||||
// Buffer to accumulate characters during a detected non-bracketed paste burst.
|
||||
paste_burst_buffer: String,
|
||||
in_paste_burst_mode: bool,
|
||||
}
|
||||
|
||||
/// Popup state – at most one can be visible at any time.
|
||||
@@ -101,7 +127,13 @@ impl ChatComposer {
|
||||
pending_pastes: Vec::new(),
|
||||
token_usage_info: None,
|
||||
has_focus: has_input_focus,
|
||||
attached_images: Vec::new(),
|
||||
placeholder_text,
|
||||
last_plain_char_time: None,
|
||||
consecutive_plain_char_burst: 0,
|
||||
paste_burst_until: None,
|
||||
paste_burst_buffer: String::new(),
|
||||
in_paste_burst_mode: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,11 +221,29 @@ impl ChatComposer {
|
||||
} else {
|
||||
self.textarea.insert_str(&pasted);
|
||||
}
|
||||
// Explicit paste events should not trigger Enter suppression.
|
||||
self.last_plain_char_time = None;
|
||||
self.consecutive_plain_char_burst = 0;
|
||||
self.paste_burst_until = None;
|
||||
self.sync_command_popup();
|
||||
self.sync_file_search_popup();
|
||||
true
|
||||
}
|
||||
|
||||
pub fn attach_image(&mut self, path: PathBuf, width: u32, height: u32, format_label: &str) {
|
||||
let placeholder = format!("[image {width}x{height} {format_label}]");
|
||||
// Insert as an element to match large paste placeholder behavior:
|
||||
// styled distinctly and treated atomically for cursor/mutations.
|
||||
self.textarea.insert_element(&placeholder);
|
||||
self.attached_images
|
||||
.push(AttachedImage { placeholder, path });
|
||||
}
|
||||
|
||||
pub fn take_recent_submission_images(&mut self) -> Vec<PathBuf> {
|
||||
let images = std::mem::take(&mut self.attached_images);
|
||||
images.into_iter().map(|img| img.path).collect()
|
||||
}
|
||||
|
||||
/// Integrate results from an asynchronous file search.
|
||||
pub(crate) fn on_file_search_result(&mut self, query: String, matches: Vec<FileMatch>) {
|
||||
// Only apply if user is still editing a token starting with `query`.
|
||||
@@ -289,15 +339,15 @@ impl ChatComposer {
|
||||
..
|
||||
} => {
|
||||
if let Some(cmd) = popup.selected_command() {
|
||||
// Send command to the app layer.
|
||||
self.app_event_tx.send(AppEvent::DispatchCommand(*cmd));
|
||||
|
||||
// Clear textarea so no residual text remains.
|
||||
self.textarea.set_text("");
|
||||
|
||||
let result = (InputResult::Command(*cmd), true);
|
||||
|
||||
// Hide popup since the command has been dispatched.
|
||||
self.active_popup = ActivePopup::None;
|
||||
return (InputResult::None, true);
|
||||
|
||||
return result;
|
||||
}
|
||||
// Fallback to default newline handling if no command selected.
|
||||
self.handle_key_event_without_popup(key_event)
|
||||
@@ -344,19 +394,74 @@ impl ChatComposer {
|
||||
modifiers: KeyModifiers::NONE,
|
||||
..
|
||||
} => {
|
||||
if let Some(sel) = popup.selected_match() {
|
||||
let sel_path = sel.to_string();
|
||||
// Drop popup borrow before using self mutably again.
|
||||
self.insert_selected_path(&sel_path);
|
||||
let Some(sel) = popup.selected_match() else {
|
||||
self.active_popup = ActivePopup::None;
|
||||
return (InputResult::None, true);
|
||||
};
|
||||
|
||||
let sel_path = sel.to_string();
|
||||
// If selected path looks like an image (png/jpeg), attach as image instead of inserting text.
|
||||
let is_image = Self::is_image_path(&sel_path);
|
||||
if is_image {
|
||||
// Determine dimensions; if that fails fall back to normal path insertion.
|
||||
let path_buf = PathBuf::from(&sel_path);
|
||||
if let Ok((w, h)) = image::image_dimensions(&path_buf) {
|
||||
// Remove the current @token (mirror logic from insert_selected_path without inserting text)
|
||||
// using the flat text and byte-offset cursor API.
|
||||
let cursor_offset = self.textarea.cursor();
|
||||
let text = self.textarea.text();
|
||||
let before_cursor = &text[..cursor_offset];
|
||||
let after_cursor = &text[cursor_offset..];
|
||||
|
||||
// Determine token boundaries in the full text.
|
||||
let start_idx = before_cursor
|
||||
.char_indices()
|
||||
.rfind(|(_, c)| c.is_whitespace())
|
||||
.map(|(idx, c)| idx + c.len_utf8())
|
||||
.unwrap_or(0);
|
||||
let end_rel_idx = after_cursor
|
||||
.char_indices()
|
||||
.find(|(_, c)| c.is_whitespace())
|
||||
.map(|(idx, _)| idx)
|
||||
.unwrap_or(after_cursor.len());
|
||||
let end_idx = cursor_offset + end_rel_idx;
|
||||
|
||||
self.textarea.replace_range(start_idx..end_idx, "");
|
||||
self.textarea.set_cursor(start_idx);
|
||||
|
||||
let format_label = match Path::new(&sel_path)
|
||||
.extension()
|
||||
.and_then(|e| e.to_str())
|
||||
.map(|s| s.to_ascii_lowercase())
|
||||
{
|
||||
Some(ext) if ext == "png" => "PNG",
|
||||
Some(ext) if ext == "jpg" || ext == "jpeg" => "JPEG",
|
||||
_ => "IMG",
|
||||
};
|
||||
self.attach_image(path_buf.clone(), w, h, format_label);
|
||||
// Add a trailing space to keep typing fluid.
|
||||
self.textarea.insert_str(" ");
|
||||
} else {
|
||||
// Fallback to plain path insertion if metadata read fails.
|
||||
self.insert_selected_path(&sel_path);
|
||||
}
|
||||
} else {
|
||||
// Non-image: inserting file path.
|
||||
self.insert_selected_path(&sel_path);
|
||||
}
|
||||
(InputResult::None, false)
|
||||
// No selection: treat Enter as closing the popup/session.
|
||||
self.active_popup = ActivePopup::None;
|
||||
(InputResult::None, true)
|
||||
}
|
||||
input => self.handle_input_basic(input),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_image_path(path: &str) -> bool {
|
||||
let lower = path.to_ascii_lowercase();
|
||||
lower.ends_with(".png") || lower.ends_with(".jpg") || lower.ends_with(".jpeg")
|
||||
}
|
||||
|
||||
/// Extract the `@token` that the cursor is currently positioned on, if any.
|
||||
///
|
||||
/// The returned string **does not** include the leading `@`.
|
||||
@@ -532,6 +637,60 @@ impl ChatComposer {
|
||||
modifiers: KeyModifiers::NONE,
|
||||
..
|
||||
} => {
|
||||
// If we're in a paste-like burst capture, treat Enter as part of the burst
|
||||
// and accumulate it rather than submitting or inserting immediately.
|
||||
// Do not treat Enter as paste inside a slash-command context.
|
||||
let in_slash_context = matches!(self.active_popup, ActivePopup::Command(_))
|
||||
|| self
|
||||
.textarea
|
||||
.text()
|
||||
.lines()
|
||||
.next()
|
||||
.unwrap_or("")
|
||||
.starts_with('/');
|
||||
if (self.in_paste_burst_mode || !self.paste_burst_buffer.is_empty())
|
||||
&& !in_slash_context
|
||||
{
|
||||
self.paste_burst_buffer.push('\n');
|
||||
let now = Instant::now();
|
||||
// Keep the window alive so subsequent lines are captured too.
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
// If we have pending placeholder pastes, submit immediately to expand them.
|
||||
if !self.pending_pastes.is_empty() {
|
||||
let mut text = self.textarea.text().to_string();
|
||||
self.textarea.set_text("");
|
||||
for (placeholder, actual) in &self.pending_pastes {
|
||||
if text.contains(placeholder) {
|
||||
text = text.replace(placeholder, actual);
|
||||
}
|
||||
}
|
||||
self.pending_pastes.clear();
|
||||
if text.is_empty() {
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
self.history.record_local_submission(&text);
|
||||
return (InputResult::Submitted(text), true);
|
||||
}
|
||||
|
||||
// During a paste-like burst, treat Enter as a newline instead of submit.
|
||||
let now = Instant::now();
|
||||
let tight_after_char = self
|
||||
.last_plain_char_time
|
||||
.is_some_and(|t| now.duration_since(t) <= PASTE_BURST_CHAR_INTERVAL);
|
||||
let recent_after_char = self
|
||||
.last_plain_char_time
|
||||
.is_some_and(|t| now.duration_since(t) <= PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
let burst_by_count =
|
||||
recent_after_char && self.consecutive_plain_char_burst >= PASTE_BURST_MIN_CHARS;
|
||||
let in_burst_window = self.paste_burst_until.is_some_and(|until| now <= until);
|
||||
|
||||
if tight_after_char || burst_by_count || in_burst_window {
|
||||
self.textarea.insert_str("\n");
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
let mut text = self.textarea.text().to_string();
|
||||
self.textarea.set_text("");
|
||||
|
||||
@@ -543,12 +702,19 @@ impl ChatComposer {
|
||||
}
|
||||
self.pending_pastes.clear();
|
||||
|
||||
if text.is_empty() {
|
||||
(InputResult::None, true)
|
||||
} else {
|
||||
self.history.record_local_submission(&text);
|
||||
(InputResult::Submitted(text), true)
|
||||
// Strip image placeholders from the submitted text; images are retrieved via take_recent_submission_images()
|
||||
for img in &self.attached_images {
|
||||
if text.contains(&img.placeholder) {
|
||||
text = text.replace(&img.placeholder, "");
|
||||
}
|
||||
}
|
||||
|
||||
text = text.trim().to_string();
|
||||
if !text.is_empty() {
|
||||
self.history.record_local_submission(&text);
|
||||
}
|
||||
// Do not clear attached_images here; ChatWidget drains them via take_recent_submission_images().
|
||||
(InputResult::Submitted(text), true)
|
||||
}
|
||||
input => self.handle_input_basic(input),
|
||||
}
|
||||
@@ -556,17 +722,302 @@ impl ChatComposer {
|
||||
|
||||
/// Handle generic Input events that modify the textarea content.
|
||||
fn handle_input_basic(&mut self, input: KeyEvent) -> (InputResult, bool) {
|
||||
// If we have a buffered non-bracketed paste burst and enough time has
|
||||
// elapsed since the last char, flush it before handling a new input.
|
||||
let now = Instant::now();
|
||||
let timed_out = self
|
||||
.last_plain_char_time
|
||||
.is_some_and(|t| now.duration_since(t) > PASTE_BURST_CHAR_INTERVAL);
|
||||
if timed_out && (!self.paste_burst_buffer.is_empty() || self.in_paste_burst_mode) {
|
||||
let pasted = std::mem::take(&mut self.paste_burst_buffer);
|
||||
self.in_paste_burst_mode = false;
|
||||
// Reuse normal paste path (handles large-paste placeholders).
|
||||
self.handle_paste(pasted);
|
||||
}
|
||||
|
||||
// If we're capturing a burst and receive Enter, accumulate it instead of inserting.
|
||||
if matches!(input.code, KeyCode::Enter)
|
||||
&& (self.in_paste_burst_mode || !self.paste_burst_buffer.is_empty())
|
||||
{
|
||||
self.paste_burst_buffer.push('\n');
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
|
||||
// Intercept plain Char inputs to optionally accumulate into a burst buffer.
|
||||
if let KeyEvent {
|
||||
code: KeyCode::Char(ch),
|
||||
modifiers,
|
||||
..
|
||||
} = input
|
||||
{
|
||||
let has_ctrl_or_alt =
|
||||
modifiers.contains(KeyModifiers::CONTROL) || modifiers.contains(KeyModifiers::ALT);
|
||||
if !has_ctrl_or_alt {
|
||||
// Update burst heuristics.
|
||||
match self.last_plain_char_time {
|
||||
Some(prev) if now.duration_since(prev) <= PASTE_BURST_CHAR_INTERVAL => {
|
||||
self.consecutive_plain_char_burst =
|
||||
self.consecutive_plain_char_burst.saturating_add(1);
|
||||
}
|
||||
_ => {
|
||||
self.consecutive_plain_char_burst = 1;
|
||||
}
|
||||
}
|
||||
self.last_plain_char_time = Some(now);
|
||||
|
||||
// If we're already buffering, capture the char into the buffer.
|
||||
if self.in_paste_burst_mode {
|
||||
self.paste_burst_buffer.push(ch);
|
||||
// Keep the window alive while we receive the burst.
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
return (InputResult::None, true);
|
||||
} else if self.consecutive_plain_char_burst >= PASTE_BURST_MIN_CHARS {
|
||||
// Do not start burst buffering while typing a slash command (first line starts with '/').
|
||||
let first_line = self.textarea.text().lines().next().unwrap_or("");
|
||||
if first_line.starts_with('/') {
|
||||
// Keep heuristics but do not buffer.
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
// Insert normally.
|
||||
self.textarea.input(input);
|
||||
let text_after = self.textarea.text();
|
||||
self.pending_pastes
|
||||
.retain(|(placeholder, _)| text_after.contains(placeholder));
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
// Begin buffering from this character onward.
|
||||
self.paste_burst_buffer.push(ch);
|
||||
self.in_paste_burst_mode = true;
|
||||
// Keep the window alive to continue capturing.
|
||||
self.paste_burst_until = Some(now + PASTE_ENTER_SUPPRESS_WINDOW);
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
|
||||
// Not buffering: insert normally and continue.
|
||||
self.textarea.input(input);
|
||||
let text_after = self.textarea.text();
|
||||
self.pending_pastes
|
||||
.retain(|(placeholder, _)| text_after.contains(placeholder));
|
||||
return (InputResult::None, true);
|
||||
} else {
|
||||
// Modified char ends any burst: flush buffered content before applying.
|
||||
if !self.paste_burst_buffer.is_empty() || self.in_paste_burst_mode {
|
||||
let pasted = std::mem::take(&mut self.paste_burst_buffer);
|
||||
self.in_paste_burst_mode = false;
|
||||
self.handle_paste(pasted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For non-char inputs (or after flushing), handle normally.
|
||||
// Special handling for backspace on placeholders
|
||||
if let KeyEvent {
|
||||
code: KeyCode::Backspace,
|
||||
..
|
||||
} = input
|
||||
&& self.try_remove_any_placeholder_at_cursor()
|
||||
{
|
||||
return (InputResult::None, true);
|
||||
}
|
||||
|
||||
// Normal input handling
|
||||
self.textarea.input(input);
|
||||
let text_after = self.textarea.text();
|
||||
|
||||
// Update paste-burst heuristic for plain Char (no Ctrl/Alt) events.
|
||||
let crossterm::event::KeyEvent {
|
||||
code, modifiers, ..
|
||||
} = input;
|
||||
match code {
|
||||
KeyCode::Char(_) => {
|
||||
let has_ctrl_or_alt = modifiers.contains(KeyModifiers::CONTROL)
|
||||
|| modifiers.contains(KeyModifiers::ALT);
|
||||
if has_ctrl_or_alt {
|
||||
// Modified char: clear burst window.
|
||||
self.consecutive_plain_char_burst = 0;
|
||||
self.last_plain_char_time = None;
|
||||
self.paste_burst_until = None;
|
||||
self.in_paste_burst_mode = false;
|
||||
self.paste_burst_buffer.clear();
|
||||
}
|
||||
// Plain chars handled above.
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
// Keep burst window alive (supports blank lines in paste).
|
||||
}
|
||||
_ => {
|
||||
// Other keys: clear burst window and any buffer (after flushing earlier).
|
||||
self.consecutive_plain_char_burst = 0;
|
||||
self.last_plain_char_time = None;
|
||||
self.paste_burst_until = None;
|
||||
self.in_paste_burst_mode = false;
|
||||
// Do not clear paste_burst_buffer here; it should have been flushed above.
|
||||
}
|
||||
}
|
||||
|
||||
// Check if any placeholders were removed and remove their corresponding pending pastes
|
||||
self.pending_pastes
|
||||
.retain(|(placeholder, _)| text_after.contains(placeholder));
|
||||
|
||||
// Keep attached images in proportion to how many matching placeholders exist in the text.
|
||||
// This handles duplicate placeholders that share the same visible label.
|
||||
if !self.attached_images.is_empty() {
|
||||
let mut needed: HashMap<String, usize> = HashMap::new();
|
||||
for img in &self.attached_images {
|
||||
needed
|
||||
.entry(img.placeholder.clone())
|
||||
.or_insert_with(|| text_after.matches(&img.placeholder).count());
|
||||
}
|
||||
|
||||
let mut used: HashMap<String, usize> = HashMap::new();
|
||||
let mut kept: Vec<AttachedImage> = Vec::with_capacity(self.attached_images.len());
|
||||
for img in self.attached_images.drain(..) {
|
||||
let total_needed = *needed.get(&img.placeholder).unwrap_or(&0);
|
||||
let used_count = used.entry(img.placeholder.clone()).or_insert(0);
|
||||
if *used_count < total_needed {
|
||||
kept.push(img);
|
||||
*used_count += 1;
|
||||
}
|
||||
}
|
||||
self.attached_images = kept;
|
||||
}
|
||||
|
||||
(InputResult::None, true)
|
||||
}
|
||||
|
||||
/// Attempts to remove an image or paste placeholder if the cursor is at the end of one.
|
||||
/// Returns true if a placeholder was removed.
|
||||
fn try_remove_any_placeholder_at_cursor(&mut self) -> bool {
|
||||
let p = self.textarea.cursor();
|
||||
let text = self.textarea.text();
|
||||
|
||||
// Try image placeholders first
|
||||
let mut out: Option<(usize, String)> = None;
|
||||
// Detect if the cursor is at the end of any image placeholder.
|
||||
// If duplicates exist, remove the specific occurrence's mapping.
|
||||
for (i, img) in self.attached_images.iter().enumerate() {
|
||||
let ph = &img.placeholder;
|
||||
if p < ph.len() {
|
||||
continue;
|
||||
}
|
||||
let start = p - ph.len();
|
||||
if text[start..p] != *ph {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Count the number of occurrences of `ph` before `start`.
|
||||
let mut occ_before = 0usize;
|
||||
let mut search_pos = 0usize;
|
||||
while search_pos < start {
|
||||
if let Some(found) = text[search_pos..start].find(ph) {
|
||||
occ_before += 1;
|
||||
search_pos += found + ph.len();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the occ_before-th attached image that shares this placeholder label.
|
||||
out = if let Some((remove_idx, _)) = self
|
||||
.attached_images
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, img2)| img2.placeholder == *ph)
|
||||
.nth(occ_before)
|
||||
{
|
||||
Some((remove_idx, ph.clone()))
|
||||
} else {
|
||||
Some((i, ph.clone()))
|
||||
};
|
||||
break;
|
||||
}
|
||||
if let Some((idx, placeholder)) = out {
|
||||
self.textarea.replace_range(p - placeholder.len()..p, "");
|
||||
self.attached_images.remove(idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Also handle when the cursor is at the START of an image placeholder.
|
||||
// let result = 'out: {
|
||||
let out: Option<(usize, String)> = 'out: {
|
||||
for (i, img) in self.attached_images.iter().enumerate() {
|
||||
let ph = &img.placeholder;
|
||||
if p + ph.len() > text.len() {
|
||||
continue;
|
||||
}
|
||||
if &text[p..p + ph.len()] != ph {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Count occurrences of `ph` before `p`.
|
||||
let mut occ_before = 0usize;
|
||||
let mut search_pos = 0usize;
|
||||
while search_pos < p {
|
||||
if let Some(found) = text[search_pos..p].find(ph) {
|
||||
occ_before += 1;
|
||||
search_pos += found + ph.len();
|
||||
} else {
|
||||
break 'out None;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((remove_idx, _)) = self
|
||||
.attached_images
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, img2)| img2.placeholder == *ph)
|
||||
.nth(occ_before)
|
||||
{
|
||||
break 'out Some((remove_idx, ph.clone()));
|
||||
} else {
|
||||
break 'out Some((i, ph.clone()));
|
||||
}
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
if let Some((idx, placeholder)) = out {
|
||||
self.textarea.replace_range(p..p + placeholder.len(), "");
|
||||
self.attached_images.remove(idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Then try pasted-content placeholders
|
||||
if let Some(placeholder) = self.pending_pastes.iter().find_map(|(ph, _)| {
|
||||
if p < ph.len() {
|
||||
return None;
|
||||
}
|
||||
let start = p - ph.len();
|
||||
if text[start..p] == *ph {
|
||||
Some(ph.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}) {
|
||||
self.textarea.replace_range(p - placeholder.len()..p, "");
|
||||
self.pending_pastes.retain(|(ph, _)| ph != &placeholder);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Also handle when the cursor is at the START of a pasted-content placeholder.
|
||||
if let Some(placeholder) = self.pending_pastes.iter().find_map(|(ph, _)| {
|
||||
if p + ph.len() > text.len() {
|
||||
return None;
|
||||
}
|
||||
if &text[p..p + ph.len()] == ph {
|
||||
Some(ph.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}) {
|
||||
self.textarea.replace_range(p..p + placeholder.len(), "");
|
||||
self.pending_pastes.retain(|(ph, _)| ph != &placeholder);
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Synchronize `self.command_popup` with the current text in the
|
||||
/// textarea. This must be called after every modification that can change
|
||||
/// the text so the popup is shown/updated/hidden as appropriate.
|
||||
@@ -744,10 +1195,14 @@ impl WidgetRef for &ChatComposer {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::app_event::AppEvent;
|
||||
use crate::bottom_pane::AppEventSender;
|
||||
use crate::bottom_pane::ChatComposer;
|
||||
use crate::bottom_pane::InputResult;
|
||||
use crate::bottom_pane::chat_composer::AttachedImage;
|
||||
use crate::bottom_pane::chat_composer::LARGE_PASTE_CHAR_THRESHOLD;
|
||||
use crate::bottom_pane::textarea::TextArea;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
@@ -1039,9 +1494,8 @@ mod tests {
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyModifiers;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
|
||||
let (tx, mut rx) = unbounded_channel::<AppEvent>();
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
@@ -1057,25 +1511,18 @@ mod tests {
|
||||
let (result, _needs_redraw) =
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
|
||||
// When a slash command is dispatched, the composer should not submit
|
||||
// literal text and should clear its textarea.
|
||||
// When a slash command is dispatched, the composer should return a
|
||||
// Command result (not submit literal text) and clear its textarea.
|
||||
match result {
|
||||
InputResult::None => {}
|
||||
InputResult::Command(cmd) => {
|
||||
assert_eq!(cmd.command(), "init");
|
||||
}
|
||||
InputResult::Submitted(text) => {
|
||||
panic!("expected command dispatch, but composer submitted literal text: {text}")
|
||||
}
|
||||
InputResult::None => panic!("expected Command result for '/init'"),
|
||||
}
|
||||
assert!(composer.textarea.is_empty(), "composer should be cleared");
|
||||
|
||||
// Verify a DispatchCommand event for the "init" command was sent.
|
||||
match rx.try_recv() {
|
||||
Ok(AppEvent::DispatchCommand(cmd)) => {
|
||||
assert_eq!(cmd.command(), "init");
|
||||
}
|
||||
Ok(_other) => panic!("unexpected app event"),
|
||||
Err(TryRecvError::Empty) => panic!("expected a DispatchCommand event for '/init'"),
|
||||
Err(TryRecvError::Disconnected) => panic!("app event channel disconnected"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1105,9 +1552,8 @@ mod tests {
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyModifiers;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
|
||||
let (tx, mut rx) = unbounded_channel::<AppEvent>();
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
@@ -1120,24 +1566,16 @@ mod tests {
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
|
||||
match result {
|
||||
InputResult::None => {}
|
||||
InputResult::Command(cmd) => {
|
||||
assert_eq!(cmd.command(), "mention");
|
||||
}
|
||||
InputResult::Submitted(text) => {
|
||||
panic!("expected command dispatch, but composer submitted literal text: {text}")
|
||||
}
|
||||
InputResult::None => panic!("expected Command result for '/mention'"),
|
||||
}
|
||||
assert!(composer.textarea.is_empty(), "composer should be cleared");
|
||||
|
||||
match rx.try_recv() {
|
||||
Ok(AppEvent::DispatchCommand(cmd)) => {
|
||||
assert_eq!(cmd.command(), "mention");
|
||||
composer.insert_str("@");
|
||||
}
|
||||
Ok(_other) => panic!("unexpected app event"),
|
||||
Err(TryRecvError::Empty) => panic!("expected a DispatchCommand event for '/mention'"),
|
||||
Err(TryRecvError::Disconnected) => {
|
||||
panic!("app event channel disconnected")
|
||||
}
|
||||
}
|
||||
composer.insert_str("@");
|
||||
assert_eq!(composer.textarea.text(), "@");
|
||||
}
|
||||
|
||||
@@ -1327,4 +1765,112 @@ mod tests {
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// --- Image attachment tests ---
|
||||
#[test]
|
||||
fn attach_image_and_submit_includes_image_paths() {
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
let path = PathBuf::from("/tmp/image1.png");
|
||||
composer.attach_image(path.clone(), 32, 16, "PNG");
|
||||
composer.handle_paste(" hi".into());
|
||||
let (result, _) =
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
match result {
|
||||
InputResult::Submitted(text) => assert_eq!(text, "hi"),
|
||||
_ => panic!("expected Submitted"),
|
||||
}
|
||||
let imgs = composer.take_recent_submission_images();
|
||||
assert_eq!(vec![path], imgs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn attach_image_without_text_submits_empty_text_and_images() {
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
let path = PathBuf::from("/tmp/image2.png");
|
||||
composer.attach_image(path.clone(), 10, 5, "PNG");
|
||||
let (result, _) =
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
match result {
|
||||
InputResult::Submitted(text) => assert!(text.is_empty()),
|
||||
_ => panic!("expected Submitted"),
|
||||
}
|
||||
let imgs = composer.take_recent_submission_images();
|
||||
assert_eq!(imgs.len(), 1);
|
||||
assert_eq!(imgs[0], path);
|
||||
assert!(composer.attached_images.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn image_placeholder_backspace_behaves_like_text_placeholder() {
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
let path = PathBuf::from("/tmp/image3.png");
|
||||
composer.attach_image(path.clone(), 20, 10, "PNG");
|
||||
let placeholder = composer.attached_images[0].placeholder.clone();
|
||||
|
||||
// Case 1: backspace at end
|
||||
composer.textarea.move_cursor_to_end_of_line(false);
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Backspace, KeyModifiers::NONE));
|
||||
assert!(!composer.textarea.text().contains(&placeholder));
|
||||
assert!(composer.attached_images.is_empty());
|
||||
|
||||
// Re-add and test backspace in middle: should break the placeholder string
|
||||
// and drop the image mapping (same as text placeholder behavior).
|
||||
composer.attach_image(path.clone(), 20, 10, "PNG");
|
||||
let placeholder2 = composer.attached_images[0].placeholder.clone();
|
||||
// Move cursor to roughly middle of placeholder
|
||||
if let Some(start_pos) = composer.textarea.text().find(&placeholder2) {
|
||||
let mid_pos = start_pos + (placeholder2.len() / 2);
|
||||
composer.textarea.set_cursor(mid_pos);
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Backspace, KeyModifiers::NONE));
|
||||
assert!(!composer.textarea.text().contains(&placeholder2));
|
||||
assert!(composer.attached_images.is_empty());
|
||||
} else {
|
||||
panic!("Placeholder not found in textarea");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deleting_one_of_duplicate_image_placeholders_removes_matching_entry() {
|
||||
let (tx, _rx) = unbounded_channel::<AppEvent>();
|
||||
let sender = AppEventSender::new(tx);
|
||||
let mut composer =
|
||||
ChatComposer::new(true, sender, false, "Ask Codex to do anything".to_string());
|
||||
|
||||
let path1 = PathBuf::from("/tmp/image_dup1.png");
|
||||
let path2 = PathBuf::from("/tmp/image_dup2.png");
|
||||
|
||||
composer.attach_image(path1.clone(), 10, 5, "PNG");
|
||||
// separate placeholders with a space for clarity
|
||||
composer.handle_paste(" ".into());
|
||||
composer.attach_image(path2.clone(), 10, 5, "PNG");
|
||||
|
||||
let ph = composer.attached_images[0].placeholder.clone();
|
||||
let text = composer.textarea.text().to_string();
|
||||
let start1 = text.find(&ph).expect("first placeholder present");
|
||||
let end1 = start1 + ph.len();
|
||||
composer.textarea.set_cursor(end1);
|
||||
|
||||
// Backspace should delete the first placeholder and its mapping.
|
||||
composer.handle_key_event(KeyEvent::new(KeyCode::Backspace, KeyModifiers::NONE));
|
||||
|
||||
let new_text = composer.textarea.text().to_string();
|
||||
assert_eq!(1, new_text.matches(&ph).count(), "one placeholder remains");
|
||||
assert_eq!(
|
||||
vec![AttachedImage {
|
||||
path: path2,
|
||||
placeholder: "[image 10x5 PNG]".to_string()
|
||||
}],
|
||||
composer.attached_images,
|
||||
"one image mapping remains"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Bottom pane: shows the ChatComposer or a BottomPaneView, if one is active.
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::app_event_sender::AppEventSender;
|
||||
use crate::tui::FrameRequester;
|
||||
@@ -182,6 +183,17 @@ impl BottomPane {
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
/// Update the animated header shown to the left of the brackets in the
|
||||
/// status indicator (defaults to "Working"). This will update the active
|
||||
/// StatusIndicatorView if present; otherwise, if a live overlay is active,
|
||||
/// it will update that. If neither is present, this call is a no-op.
|
||||
pub(crate) fn update_status_header(&mut self, header: String) {
|
||||
if let Some(view) = self.active_view.as_mut() {
|
||||
view.update_status_header(header.clone());
|
||||
self.request_redraw();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn show_ctrl_c_quit_hint(&mut self) {
|
||||
self.ctrl_c_quit_hint = true;
|
||||
self.composer
|
||||
@@ -331,6 +343,24 @@ impl BottomPane {
|
||||
self.composer.on_file_search_result(query, matches);
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn attach_image(
|
||||
&mut self,
|
||||
path: PathBuf,
|
||||
width: u32,
|
||||
height: u32,
|
||||
format_label: &str,
|
||||
) {
|
||||
if self.active_view.is_none() {
|
||||
self.composer
|
||||
.attach_image(path, width, height, format_label);
|
||||
self.request_redraw();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn take_recent_submission_images(&mut self) -> Vec<PathBuf> {
|
||||
self.composer.take_recent_submission_images()
|
||||
}
|
||||
}
|
||||
|
||||
impl WidgetRef for &BottomPane {
|
||||
@@ -462,8 +492,6 @@ mod tests {
|
||||
assert!(pane.active_view.is_some(), "active view should be present");
|
||||
|
||||
// Render and ensure the top row includes the Working header instead of the composer.
|
||||
// Give the animation thread a moment to tick.
|
||||
std::thread::sleep(std::time::Duration::from_millis(120));
|
||||
let area = Rect::new(0, 0, 40, 3);
|
||||
let mut buf = Buffer::empty(area);
|
||||
(&pane).render_ref(area, &mut buf);
|
||||
@@ -495,9 +523,6 @@ mod tests {
|
||||
// Begin a task: show initial status.
|
||||
pane.set_task_running(true);
|
||||
|
||||
// Allow some frames so the animation thread ticks.
|
||||
std::thread::sleep(std::time::Duration::from_millis(120));
|
||||
|
||||
// Render and confirm the line contains the "Working" header.
|
||||
let area = Rect::new(0, 0, 40, 3);
|
||||
let mut buf = Buffer::empty(area);
|
||||
|
||||
@@ -24,9 +24,17 @@ impl StatusIndicatorView {
|
||||
pub fn update_text(&mut self, text: String) {
|
||||
self.view.update_text(text);
|
||||
}
|
||||
|
||||
pub fn update_header(&mut self, header: String) {
|
||||
self.view.update_header(header);
|
||||
}
|
||||
}
|
||||
|
||||
impl BottomPaneView for StatusIndicatorView {
|
||||
fn update_status_header(&mut self, header: String) {
|
||||
self.update_header(header);
|
||||
}
|
||||
|
||||
fn should_hide_when_task_is_done(&mut self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
@@ -1473,7 +1473,7 @@ mod tests {
|
||||
.timestamp() as u64;
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(pst_today_seed);
|
||||
|
||||
for _case in 0..10_000 {
|
||||
for _case in 0..500 {
|
||||
let mut ta = TextArea::new();
|
||||
let mut state = TextAreaState::default();
|
||||
// Track element payloads we insert. Payloads use characters '[' and ']' which
|
||||
@@ -1497,7 +1497,7 @@ mod tests {
|
||||
let mut width: u16 = rng.random_range(1..=12);
|
||||
let mut height: u16 = rng.random_range(1..=4);
|
||||
|
||||
for _step in 0..200 {
|
||||
for _step in 0..60 {
|
||||
// Mostly stable width/height, occasionally change
|
||||
if rng.random_bool(0.1) {
|
||||
width = rng.random_range(1..=12);
|
||||
|
||||
@@ -23,6 +23,7 @@ use codex_core::protocol::McpToolCallBeginEvent;
|
||||
use codex_core::protocol::McpToolCallEndEvent;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
@@ -47,11 +48,13 @@ use crate::bottom_pane::CancellationEvent;
|
||||
use crate::bottom_pane::InputResult;
|
||||
use crate::bottom_pane::SelectionAction;
|
||||
use crate::bottom_pane::SelectionItem;
|
||||
use crate::get_git_diff::get_git_diff;
|
||||
use crate::history_cell;
|
||||
use crate::history_cell::CommandOutput;
|
||||
use crate::history_cell::ExecCell;
|
||||
use crate::history_cell::HistoryCell;
|
||||
use crate::history_cell::PatchEventType;
|
||||
use crate::slash_command::SlashCommand;
|
||||
use crate::tui::FrameRequester;
|
||||
// streaming internals are provided by crate::streaming and crate::markdown_stream
|
||||
use crate::user_approval_widget::ApprovalRequest;
|
||||
@@ -89,8 +92,6 @@ pub(crate) struct ChatWidget {
|
||||
last_token_usage: TokenUsage,
|
||||
// Stream lifecycle controller
|
||||
stream: StreamController,
|
||||
// Track the most recently active stream kind in the current turn
|
||||
last_stream_kind: Option<StreamKind>,
|
||||
running_commands: HashMap<String, RunningCommand>,
|
||||
pending_exec_completions: Vec<(Vec<String>, Vec<ParsedCommand>, CommandOutput)>,
|
||||
task_complete_pending: bool,
|
||||
@@ -98,6 +99,10 @@ pub(crate) struct ChatWidget {
|
||||
interrupts: InterruptManager,
|
||||
// Whether a redraw is needed after handling the current event
|
||||
needs_redraw: bool,
|
||||
// Accumulates the current reasoning block text to extract a header
|
||||
reasoning_buffer: String,
|
||||
// Accumulates full reasoning content for transcript-only recording
|
||||
full_reasoning_buffer: String,
|
||||
session_id: Option<Uuid>,
|
||||
frame_requester: FrameRequester,
|
||||
}
|
||||
@@ -107,8 +112,6 @@ struct UserMessage {
|
||||
image_paths: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
use crate::streaming::StreamKind;
|
||||
|
||||
impl From<String> for UserMessage {
|
||||
fn from(text: String) -> Self {
|
||||
Self {
|
||||
@@ -133,14 +136,14 @@ impl ChatWidget {
|
||||
}
|
||||
fn flush_answer_stream_with_separator(&mut self) {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
let _ = self.stream.finalize(StreamKind::Answer, true, &sink);
|
||||
let _ = self.stream.finalize(true, &sink);
|
||||
}
|
||||
// --- Small event handlers ---
|
||||
fn on_session_configured(&mut self, event: codex_core::protocol::SessionConfiguredEvent) {
|
||||
self.bottom_pane
|
||||
.set_history_metadata(event.history_log_id, event.history_entry_count);
|
||||
self.session_id = Some(event.session_id);
|
||||
self.add_to_history(&history_cell::new_session_info(&self.config, event, true));
|
||||
self.add_to_history(history_cell::new_session_info(&self.config, event, true));
|
||||
if let Some(user_message) = self.initial_user_message.take() {
|
||||
self.submit_user_message(user_message);
|
||||
}
|
||||
@@ -150,30 +153,48 @@ impl ChatWidget {
|
||||
fn on_agent_message(&mut self, message: String) {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
let finished = self.stream.apply_final_answer(&message, &sink);
|
||||
self.last_stream_kind = Some(StreamKind::Answer);
|
||||
self.handle_if_stream_finished(finished);
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
|
||||
fn on_agent_message_delta(&mut self, delta: String) {
|
||||
self.handle_streaming_delta(StreamKind::Answer, delta);
|
||||
self.handle_streaming_delta(delta);
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_delta(&mut self, delta: String) {
|
||||
self.handle_streaming_delta(StreamKind::Reasoning, delta);
|
||||
// For reasoning deltas, do not stream to history. Accumulate the
|
||||
// current reasoning block and extract the first bold element
|
||||
// (between **/**) as the chunk header. Show this header as status.
|
||||
self.reasoning_buffer.push_str(&delta);
|
||||
|
||||
if let Some(header) = extract_first_bold(&self.reasoning_buffer) {
|
||||
// Update the shimmer header to the extracted reasoning chunk header.
|
||||
self.bottom_pane.update_status_header(header);
|
||||
} else {
|
||||
// Fallback while we don't yet have a bold header: leave existing header as-is.
|
||||
}
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_final(&mut self, text: String) {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
let finished = self.stream.apply_final_reasoning(&text, &sink);
|
||||
self.last_stream_kind = Some(StreamKind::Reasoning);
|
||||
self.handle_if_stream_finished(finished);
|
||||
fn on_agent_reasoning_final(&mut self) {
|
||||
// At the end of a reasoning block, record transcript-only content.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
if !self.full_reasoning_buffer.is_empty() {
|
||||
self.add_to_history(history_cell::new_reasoning_block(
|
||||
self.full_reasoning_buffer.clone(),
|
||||
&self.config,
|
||||
));
|
||||
}
|
||||
self.reasoning_buffer.clear();
|
||||
self.full_reasoning_buffer.clear();
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
|
||||
fn on_reasoning_section_break(&mut self) {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
self.stream.insert_reasoning_section_break(&sink);
|
||||
// Start a new reasoning block for header extraction and accumulate transcript.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
self.full_reasoning_buffer.push_str("\n\n");
|
||||
self.reasoning_buffer.clear();
|
||||
}
|
||||
|
||||
// Raw reasoning uses the same flow as summarized reasoning
|
||||
@@ -182,7 +203,8 @@ impl ChatWidget {
|
||||
self.bottom_pane.clear_ctrl_c_quit_hint();
|
||||
self.bottom_pane.set_task_running(true);
|
||||
self.stream.reset_headers_for_new_turn();
|
||||
self.last_stream_kind = None;
|
||||
self.full_reasoning_buffer.clear();
|
||||
self.reasoning_buffer.clear();
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
|
||||
@@ -191,9 +213,7 @@ impl ChatWidget {
|
||||
// without emitting stray headers for other streams.
|
||||
if self.stream.is_write_cycle_active() {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
if let Some(kind) = self.last_stream_kind {
|
||||
let _ = self.stream.finalize(kind, true, &sink);
|
||||
}
|
||||
let _ = self.stream.finalize(true, &sink);
|
||||
}
|
||||
// Mark task stopped and request redraw now that all content is in history.
|
||||
self.bottom_pane.set_task_running(false);
|
||||
@@ -212,7 +232,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_error(&mut self, message: String) {
|
||||
self.add_to_history(&history_cell::new_error_event(message));
|
||||
self.add_to_history(history_cell::new_error_event(message));
|
||||
self.bottom_pane.set_task_running(false);
|
||||
self.running_commands.clear();
|
||||
self.stream.clear_all();
|
||||
@@ -220,7 +240,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_plan_update(&mut self, update: codex_core::plan_tool::UpdatePlanArgs) {
|
||||
self.add_to_history(&history_cell::new_plan_update(update));
|
||||
self.add_to_history(history_cell::new_plan_update(update));
|
||||
}
|
||||
|
||||
fn on_exec_approval_request(&mut self, id: String, ev: ExecApprovalRequestEvent) {
|
||||
@@ -255,7 +275,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_patch_apply_begin(&mut self, event: PatchApplyBeginEvent) {
|
||||
self.add_to_history(&history_cell::new_patch_event(
|
||||
self.add_to_history(history_cell::new_patch_event(
|
||||
PatchEventType::ApplyBegin {
|
||||
auto_approved: event.auto_approved,
|
||||
},
|
||||
@@ -310,6 +330,12 @@ impl ChatWidget {
|
||||
fn on_background_event(&mut self, message: String) {
|
||||
debug!("BackgroundEvent: {message}");
|
||||
}
|
||||
|
||||
fn on_stream_error(&mut self, message: String) {
|
||||
// Show stream errors in the transcript so users see retry/backoff info.
|
||||
self.add_to_history(history_cell::new_stream_error_event(message));
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
/// Periodic tick to commit at most one queued line to history with a small delay,
|
||||
/// animating the output.
|
||||
pub(crate) fn on_commit_tick(&mut self) {
|
||||
@@ -355,10 +381,9 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn handle_streaming_delta(&mut self, kind: StreamKind, delta: String) {
|
||||
fn handle_streaming_delta(&mut self, delta: String) {
|
||||
let sink = AppEventHistorySink(self.app_event_tx.clone());
|
||||
self.stream.begin(kind, &sink);
|
||||
self.last_stream_kind = Some(kind);
|
||||
self.stream.begin(&sink);
|
||||
self.stream.push_and_maybe_commit(&delta, &sink);
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
@@ -383,7 +408,7 @@ impl ChatWidget {
|
||||
self.active_exec_cell = None;
|
||||
let pending = std::mem::take(&mut self.pending_exec_completions);
|
||||
for (command, parsed, output) in pending {
|
||||
self.add_to_history(&history_cell::new_completed_exec_command(
|
||||
self.add_to_history(history_cell::new_completed_exec_command(
|
||||
command, parsed, output,
|
||||
));
|
||||
}
|
||||
@@ -395,9 +420,9 @@ impl ChatWidget {
|
||||
event: codex_core::protocol::PatchApplyEndEvent,
|
||||
) {
|
||||
if event.success {
|
||||
self.add_to_history(&history_cell::new_patch_apply_success(event.stdout));
|
||||
self.add_to_history(history_cell::new_patch_apply_success(event.stdout));
|
||||
} else {
|
||||
self.add_to_history(&history_cell::new_patch_apply_failure(event.stderr));
|
||||
self.add_to_history(history_cell::new_patch_apply_failure(event.stderr));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,7 +444,7 @@ impl ChatWidget {
|
||||
ev: ApplyPatchApprovalRequestEvent,
|
||||
) {
|
||||
self.flush_answer_stream_with_separator();
|
||||
self.add_to_history(&history_cell::new_patch_event(
|
||||
self.add_to_history(history_cell::new_patch_event(
|
||||
PatchEventType::ApprovalRequest,
|
||||
ev.changes.clone(),
|
||||
));
|
||||
@@ -461,11 +486,11 @@ impl ChatWidget {
|
||||
|
||||
pub(crate) fn handle_mcp_begin_now(&mut self, ev: McpToolCallBeginEvent) {
|
||||
self.flush_answer_stream_with_separator();
|
||||
self.add_to_history(&history_cell::new_active_mcp_tool_call(ev.invocation));
|
||||
self.add_to_history(history_cell::new_active_mcp_tool_call(ev.invocation));
|
||||
}
|
||||
pub(crate) fn handle_mcp_end_now(&mut self, ev: McpToolCallEndEvent) {
|
||||
self.flush_answer_stream_with_separator();
|
||||
self.add_to_history(&*history_cell::new_completed_mcp_tool_call(
|
||||
self.add_boxed_history(history_cell::new_completed_mcp_tool_call(
|
||||
80,
|
||||
ev.invocation,
|
||||
ev.duration,
|
||||
@@ -532,12 +557,13 @@ impl ChatWidget {
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
stream: StreamController::new(config),
|
||||
last_stream_kind: None,
|
||||
running_commands: HashMap::new(),
|
||||
pending_exec_completions: Vec::new(),
|
||||
task_complete_pending: false,
|
||||
interrupts: InterruptManager::new(),
|
||||
needs_redraw: false,
|
||||
reasoning_buffer: String::new(),
|
||||
full_reasoning_buffer: String::new(),
|
||||
session_id: None,
|
||||
}
|
||||
}
|
||||
@@ -557,12 +583,130 @@ impl ChatWidget {
|
||||
|
||||
match self.bottom_pane.handle_key_event(key_event) {
|
||||
InputResult::Submitted(text) => {
|
||||
self.submit_user_message(text.into());
|
||||
let images = self.bottom_pane.take_recent_submission_images();
|
||||
self.submit_user_message(UserMessage {
|
||||
text,
|
||||
image_paths: images,
|
||||
});
|
||||
}
|
||||
InputResult::Command(cmd) => {
|
||||
self.dispatch_command(cmd);
|
||||
}
|
||||
InputResult::None => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn attach_image(
|
||||
&mut self,
|
||||
path: PathBuf,
|
||||
width: u32,
|
||||
height: u32,
|
||||
format_label: &str,
|
||||
) {
|
||||
tracing::info!(
|
||||
"attach_image path={path:?} width={width} height={height} format={format_label}",
|
||||
);
|
||||
self.bottom_pane
|
||||
.attach_image(path.clone(), width, height, format_label);
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
fn dispatch_command(&mut self, cmd: SlashCommand) {
|
||||
match cmd {
|
||||
SlashCommand::New => {
|
||||
self.app_event_tx.send(AppEvent::NewSession);
|
||||
}
|
||||
SlashCommand::Init => {
|
||||
// Guard: do not run if a task is active.
|
||||
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
|
||||
self.submit_text_message(INIT_PROMPT.to_string());
|
||||
}
|
||||
SlashCommand::Compact => {
|
||||
self.clear_token_usage();
|
||||
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
|
||||
}
|
||||
SlashCommand::Model => {
|
||||
self.open_model_popup();
|
||||
}
|
||||
SlashCommand::Approvals => {
|
||||
self.open_approvals_popup();
|
||||
}
|
||||
SlashCommand::Quit => {
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
SlashCommand::Logout => {
|
||||
if let Err(e) = codex_login::logout(&self.config.codex_home) {
|
||||
tracing::error!("failed to logout: {e}");
|
||||
}
|
||||
self.app_event_tx.send(AppEvent::ExitRequest);
|
||||
}
|
||||
SlashCommand::Diff => {
|
||||
self.add_diff_in_progress();
|
||||
let tx = self.app_event_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let text = match get_git_diff().await {
|
||||
Ok((is_git_repo, diff_text)) => {
|
||||
if is_git_repo {
|
||||
diff_text
|
||||
} else {
|
||||
"`/diff` — _not inside a git repository_".to_string()
|
||||
}
|
||||
}
|
||||
Err(e) => format!("Failed to compute diff: {e}"),
|
||||
};
|
||||
tx.send(AppEvent::DiffResult(text));
|
||||
});
|
||||
}
|
||||
SlashCommand::Mention => {
|
||||
self.insert_str("@");
|
||||
}
|
||||
SlashCommand::Status => {
|
||||
self.add_status_output();
|
||||
}
|
||||
SlashCommand::Mcp => {
|
||||
self.add_mcp_output();
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
SlashCommand::TestApproval => {
|
||||
use codex_core::protocol::EventMsg;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_core::protocol::FileChange;
|
||||
|
||||
self.app_event_tx.send(AppEvent::CodexEvent(Event {
|
||||
id: "1".to_string(),
|
||||
// msg: EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
|
||||
// call_id: "1".to_string(),
|
||||
// command: vec!["git".into(), "apply".into()],
|
||||
// cwd: self.config.cwd.clone(),
|
||||
// reason: Some("test".to_string()),
|
||||
// }),
|
||||
msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
|
||||
call_id: "1".to_string(),
|
||||
changes: HashMap::from([
|
||||
(
|
||||
PathBuf::from("/tmp/test.txt"),
|
||||
FileChange::Add {
|
||||
content: "test".to_string(),
|
||||
},
|
||||
),
|
||||
(
|
||||
PathBuf::from("/tmp/test2.txt"),
|
||||
FileChange::Update {
|
||||
unified_diff: "+test\n-test2".to_string(),
|
||||
move_path: None,
|
||||
},
|
||||
),
|
||||
]),
|
||||
reason: None,
|
||||
grant_root: Some(PathBuf::from("/tmp")),
|
||||
}),
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn handle_paste(&mut self, text: String) {
|
||||
self.bottom_pane.handle_paste(text);
|
||||
}
|
||||
@@ -570,14 +714,19 @@ impl ChatWidget {
|
||||
fn flush_active_exec_cell(&mut self) {
|
||||
if let Some(active) = self.active_exec_cell.take() {
|
||||
self.app_event_tx
|
||||
.send(AppEvent::InsertHistory(active.display_lines()));
|
||||
.send(AppEvent::InsertHistoryCell(Box::new(active)));
|
||||
}
|
||||
}
|
||||
|
||||
fn add_to_history(&mut self, cell: &dyn HistoryCell) {
|
||||
fn add_to_history(&mut self, cell: impl HistoryCell + 'static) {
|
||||
self.flush_active_exec_cell();
|
||||
self.app_event_tx
|
||||
.send(AppEvent::InsertHistory(cell.display_lines()));
|
||||
.send(AppEvent::InsertHistoryCell(Box::new(cell)));
|
||||
}
|
||||
|
||||
fn add_boxed_history(&mut self, cell: Box<dyn HistoryCell>) {
|
||||
self.flush_active_exec_cell();
|
||||
self.app_event_tx.send(AppEvent::InsertHistoryCell(cell));
|
||||
}
|
||||
|
||||
fn submit_user_message(&mut self, user_message: UserMessage) {
|
||||
@@ -613,7 +762,7 @@ impl ChatWidget {
|
||||
|
||||
// Only show the text portion in conversation history.
|
||||
if !text.is_empty() {
|
||||
self.add_to_history(&history_cell::new_user_prompt(text.clone()));
|
||||
self.add_to_history(history_cell::new_user_prompt(text.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -641,9 +790,9 @@ impl ChatWidget {
|
||||
| EventMsg::AgentReasoningRawContentDelta(AgentReasoningRawContentDeltaEvent {
|
||||
delta,
|
||||
}) => self.on_agent_reasoning_delta(delta),
|
||||
EventMsg::AgentReasoning(AgentReasoningEvent { text })
|
||||
| EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent { text }) => {
|
||||
self.on_agent_reasoning_final(text)
|
||||
EventMsg::AgentReasoning(AgentReasoningEvent { .. })
|
||||
| EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent { .. }) => {
|
||||
self.on_agent_reasoning_final()
|
||||
}
|
||||
EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(),
|
||||
EventMsg::TaskStarted => self.on_task_started(),
|
||||
@@ -668,6 +817,7 @@ impl ChatWidget {
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
self.on_background_event(message)
|
||||
}
|
||||
EventMsg::StreamError(StreamErrorEvent { message }) => self.on_stream_error(message),
|
||||
}
|
||||
// Coalesce redraws: issue at most one after handling the event
|
||||
if self.needs_redraw {
|
||||
@@ -687,14 +837,13 @@ impl ChatWidget {
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn add_diff_output(&mut self, diff_output: String) {
|
||||
pub(crate) fn on_diff_complete(&mut self) {
|
||||
self.bottom_pane.set_task_running(false);
|
||||
self.add_to_history(&history_cell::new_diff_output(diff_output));
|
||||
self.mark_needs_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn add_status_output(&mut self) {
|
||||
self.add_to_history(&history_cell::new_status_output(
|
||||
self.add_to_history(history_cell::new_status_output(
|
||||
&self.config,
|
||||
&self.total_token_usage,
|
||||
&self.session_id,
|
||||
@@ -805,7 +954,7 @@ impl ChatWidget {
|
||||
|
||||
pub(crate) fn add_mcp_output(&mut self) {
|
||||
if self.config.mcp_servers.is_empty() {
|
||||
self.add_to_history(&history_cell::empty_mcp_output());
|
||||
self.add_to_history(history_cell::empty_mcp_output());
|
||||
} else {
|
||||
self.submit_op(Op::ListMcpTools);
|
||||
}
|
||||
@@ -853,7 +1002,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_list_mcp_tools(&mut self, ev: McpListToolsResponseEvent) {
|
||||
self.add_to_history(&history_cell::new_mcp_tools_output(&self.config, ev.tools));
|
||||
self.add_to_history(history_cell::new_mcp_tools_output(&self.config, ev.tools));
|
||||
}
|
||||
|
||||
/// Programmatically submit a user text message as if typed in the
|
||||
@@ -932,5 +1081,35 @@ fn add_token_usage(current_usage: &TokenUsage, new_usage: &TokenUsage) -> TokenU
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the first bold (Markdown) element in the form **...** from `s`.
|
||||
// Returns the inner text if found; otherwise `None`.
|
||||
fn extract_first_bold(s: &str) -> Option<String> {
|
||||
let bytes = s.as_bytes();
|
||||
let mut i = 0usize;
|
||||
while i + 1 < bytes.len() {
|
||||
if bytes[i] == b'*' && bytes[i + 1] == b'*' {
|
||||
let start = i + 2;
|
||||
let mut j = start;
|
||||
while j + 1 < bytes.len() {
|
||||
if bytes[j] == b'*' && bytes[j + 1] == b'*' {
|
||||
// Found closing **
|
||||
let inner = &s[start..j];
|
||||
let trimmed = inner.trim();
|
||||
if !trimmed.is_empty() {
|
||||
return Some(trimmed.to_string());
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
// No closing; stop searching (wait for more deltas)
|
||||
return None;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
---
|
||||
source: tui/src/chatwidget/tests.rs
|
||||
assertion_line: 886
|
||||
expression: combined
|
||||
---
|
||||
thinking
|
||||
I will first analyze the request.
|
||||
|
||||
codex
|
||||
Here is the result.
|
||||
|
||||
@@ -2,8 +2,5 @@
|
||||
source: tui/src/chatwidget/tests.rs
|
||||
expression: combined
|
||||
---
|
||||
thinking
|
||||
I will first analyze the request.
|
||||
|
||||
codex
|
||||
Here is the result.
|
||||
|
||||
@@ -19,7 +19,9 @@ use codex_core::protocol::ExecCommandEndEvent;
|
||||
use codex_core::protocol::FileChange;
|
||||
use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_login::CodexAuth;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyModifiers;
|
||||
@@ -103,7 +105,9 @@ async fn helpers_are_available_and_do_not_panic() {
|
||||
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let cfg = test_config();
|
||||
let conversation_manager = Arc::new(ConversationManager::default());
|
||||
let conversation_manager = Arc::new(ConversationManager::with_auth(CodexAuth::from_api_key(
|
||||
"test",
|
||||
)));
|
||||
let mut w = ChatWidget::new(
|
||||
cfg,
|
||||
conversation_manager,
|
||||
@@ -144,12 +148,13 @@ fn make_chatwidget_manual() -> (
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
stream: StreamController::new(cfg),
|
||||
last_stream_kind: None,
|
||||
running_commands: HashMap::new(),
|
||||
pending_exec_completions: Vec::new(),
|
||||
task_complete_pending: false,
|
||||
interrupts: InterruptManager::new(),
|
||||
needs_redraw: false,
|
||||
reasoning_buffer: String::new(),
|
||||
full_reasoning_buffer: String::new(),
|
||||
session_id: None,
|
||||
frame_requester: crate::tui::FrameRequester::test_dummy(),
|
||||
};
|
||||
@@ -161,8 +166,10 @@ fn drain_insert_history(
|
||||
) -> Vec<Vec<ratatui::text::Line<'static>>> {
|
||||
let mut out = Vec::new();
|
||||
while let Ok(ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = ev {
|
||||
out.push(lines);
|
||||
match ev {
|
||||
AppEvent::InsertHistoryLines(lines) => out.push(lines),
|
||||
AppEvent::InsertHistoryCell(cell) => out.push(cell.display_lines()),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
out
|
||||
@@ -336,13 +343,25 @@ async fn binary_size_transcript_matches_ideal_fixture() {
|
||||
let ev: Event = serde_json::from_value(payload.clone()).expect("parse");
|
||||
chat.handle_codex_event(ev);
|
||||
while let Ok(app_ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = app_ev {
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
match app_ev {
|
||||
AppEvent::InsertHistoryLines(lines) => {
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
}
|
||||
AppEvent::InsertHistoryCell(cell) => {
|
||||
let lines = cell.display_lines();
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -353,13 +372,25 @@ async fn binary_size_transcript_matches_ideal_fixture() {
|
||||
{
|
||||
chat.on_commit_tick();
|
||||
while let Ok(app_ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = app_ev {
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
match app_ev {
|
||||
AppEvent::InsertHistoryLines(lines) => {
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
}
|
||||
AppEvent::InsertHistoryCell(cell) => {
|
||||
let lines = cell.display_lines();
|
||||
transcript.push_str(&lines_to_single_string(&lines));
|
||||
crate::insert_history::insert_history_lines_to_writer(
|
||||
&mut terminal,
|
||||
&mut ansi,
|
||||
lines,
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -375,6 +406,11 @@ async fn binary_size_transcript_matches_ideal_fixture() {
|
||||
.expect("read ideal-binary-response.txt");
|
||||
// Normalize line endings for Windows vs. Unix checkouts
|
||||
let ideal = ideal.replace("\r\n", "\n");
|
||||
let ideal_first_line = ideal
|
||||
.lines()
|
||||
.find(|l| !l.trim().is_empty())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
// Build the final VT100 visual by parsing the ANSI stream. Trim trailing spaces per line
|
||||
// and drop trailing empty lines so the shape matches the ideal fixture exactly.
|
||||
@@ -400,22 +436,68 @@ async fn binary_size_transcript_matches_ideal_fixture() {
|
||||
while lines.last().is_some_and(|l| l.is_empty()) {
|
||||
lines.pop();
|
||||
}
|
||||
// Compare only after the last session banner marker, and start at the next 'thinking' line.
|
||||
// Compare only after the last session banner marker. Skip the transient
|
||||
// 'thinking' header if present, and start from the first non-empty line
|
||||
// of content that follows.
|
||||
const MARKER_PREFIX: &str = ">_ You are using OpenAI Codex in ";
|
||||
let last_marker_line_idx = lines
|
||||
.iter()
|
||||
.rposition(|l| l.starts_with(MARKER_PREFIX))
|
||||
.expect("marker not found in visible output");
|
||||
let thinking_line_idx = (last_marker_line_idx + 1..lines.len())
|
||||
.find(|&idx| lines[idx].trim_start() == "thinking")
|
||||
.expect("no 'thinking' line found after marker");
|
||||
// Anchor to the first ideal line if present; otherwise use heuristics.
|
||||
let start_idx = (last_marker_line_idx + 1..lines.len())
|
||||
.find(|&idx| lines[idx].trim_start() == ideal_first_line)
|
||||
.or_else(|| {
|
||||
// Prefer the first assistant content line (blockquote '>' prefix) after the marker.
|
||||
(last_marker_line_idx + 1..lines.len())
|
||||
.find(|&idx| lines[idx].trim_start().starts_with('>'))
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
// Fallback: first non-empty, non-'thinking' line
|
||||
(last_marker_line_idx + 1..lines.len())
|
||||
.find(|&idx| {
|
||||
let t = lines[idx].trim_start();
|
||||
!t.is_empty() && t != "thinking"
|
||||
})
|
||||
.expect("no content line found after marker")
|
||||
});
|
||||
|
||||
let mut compare_lines: Vec<String> = Vec::new();
|
||||
// Ensure the first line is exactly 'thinking' without leading spaces to match the fixture
|
||||
compare_lines.push(lines[thinking_line_idx].trim_start().to_string());
|
||||
compare_lines.extend(lines[(thinking_line_idx + 1)..].iter().cloned());
|
||||
// Ensure the first line is trimmed-left to match the fixture shape.
|
||||
compare_lines.push(lines[start_idx].trim_start().to_string());
|
||||
compare_lines.extend(lines[(start_idx + 1)..].iter().cloned());
|
||||
let visible_after = compare_lines.join("\n");
|
||||
|
||||
// Normalize: drop a leading 'thinking' line if present in either side to
|
||||
// avoid coupling to whether the reasoning header is rendered in history.
|
||||
fn drop_leading_thinking(s: &str) -> String {
|
||||
let mut it = s.lines();
|
||||
let first = it.next();
|
||||
let rest = it.collect::<Vec<_>>().join("\n");
|
||||
if first.is_some_and(|l| l.trim() == "thinking") {
|
||||
rest
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
}
|
||||
let visible_after = drop_leading_thinking(&visible_after);
|
||||
let ideal = drop_leading_thinking(&ideal);
|
||||
|
||||
// Normalize: strip leading Markdown blockquote markers ('>' or '> ') which
|
||||
// may be present in rendered transcript lines but not in the ideal text.
|
||||
fn strip_blockquotes(s: &str) -> String {
|
||||
s.lines()
|
||||
.map(|l| {
|
||||
l.strip_prefix("> ")
|
||||
.or_else(|| l.strip_prefix('>'))
|
||||
.unwrap_or(l)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
}
|
||||
let visible_after = strip_blockquotes(&visible_after);
|
||||
let ideal = strip_blockquotes(&ideal);
|
||||
|
||||
// Optionally update the fixture when env var is set
|
||||
if std::env::var("UPDATE_IDEAL").as_deref() == Ok("1") {
|
||||
let mut p = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
@@ -746,7 +828,26 @@ fn plan_update_renders_history_cell() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
fn stream_error_is_rendered_to_history() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
let msg = "stream error: stream disconnected before completion: idle timeout waiting for SSE; retrying 1/5 in 211ms…";
|
||||
chat.handle_codex_event(Event {
|
||||
id: "sub-1".into(),
|
||||
msg: EventMsg::StreamError(StreamErrorEvent {
|
||||
message: msg.to_string(),
|
||||
}),
|
||||
});
|
||||
|
||||
let cells = drain_insert_history(&mut rx);
|
||||
assert!(!cells.is_empty(), "expected a history cell for StreamError");
|
||||
let blob = lines_to_single_string(cells.last().unwrap());
|
||||
assert!(blob.contains("⚠ "));
|
||||
assert!(blob.contains("stream error:"));
|
||||
assert!(blob.contains("idle timeout waiting for SSE"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn headers_emitted_on_stream_begin_for_answer_and_not_for_reasoning() {
|
||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||
|
||||
// Answer: no header until a newline commit
|
||||
@@ -758,7 +859,7 @@ fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
});
|
||||
let mut saw_codex_pre = false;
|
||||
while let Ok(ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = ev {
|
||||
if let AppEvent::InsertHistoryLines(lines) = ev {
|
||||
let s = lines
|
||||
.iter()
|
||||
.flat_map(|l| l.spans.iter())
|
||||
@@ -786,7 +887,7 @@ fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
chat.on_commit_tick();
|
||||
let mut saw_codex_post = false;
|
||||
while let Ok(ev) = rx.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = ev {
|
||||
if let AppEvent::InsertHistoryLines(lines) = ev {
|
||||
let s = lines
|
||||
.iter()
|
||||
.flat_map(|l| l.spans.iter())
|
||||
@@ -804,7 +905,7 @@ fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
"expected 'codex' header to be emitted after first newline commit"
|
||||
);
|
||||
|
||||
// Reasoning: header immediately
|
||||
// Reasoning: do NOT emit a history header; status text is updated instead
|
||||
let (mut chat2, mut rx2, _op_rx2) = make_chatwidget_manual();
|
||||
chat2.handle_codex_event(Event {
|
||||
id: "sub-b".into(),
|
||||
@@ -814,7 +915,7 @@ fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
});
|
||||
let mut saw_thinking = false;
|
||||
while let Ok(ev) = rx2.try_recv() {
|
||||
if let AppEvent::InsertHistory(lines) = ev {
|
||||
if let AppEvent::InsertHistoryLines(lines) = ev {
|
||||
let s = lines
|
||||
.iter()
|
||||
.flat_map(|l| l.spans.iter())
|
||||
@@ -828,8 +929,8 @@ fn headers_emitted_on_stream_begin_for_answer_and_reasoning() {
|
||||
}
|
||||
}
|
||||
assert!(
|
||||
saw_thinking,
|
||||
"expected 'thinking' header to be emitted at stream start"
|
||||
!saw_thinking,
|
||||
"reasoning deltas should not emit history headers"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
97
codex-rs/tui/src/clipboard_paste.rs
Normal file
97
codex-rs/tui/src/clipboard_paste.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use std::path::PathBuf;
|
||||
use tempfile::Builder;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PasteImageError {
|
||||
ClipboardUnavailable(String),
|
||||
NoImage(String),
|
||||
EncodeFailed(String),
|
||||
IoError(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PasteImageError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PasteImageError::ClipboardUnavailable(msg) => write!(f, "clipboard unavailable: {msg}"),
|
||||
PasteImageError::NoImage(msg) => write!(f, "no image on clipboard: {msg}"),
|
||||
PasteImageError::EncodeFailed(msg) => write!(f, "could not encode image: {msg}"),
|
||||
PasteImageError::IoError(msg) => write!(f, "io error: {msg}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl std::error::Error for PasteImageError {}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EncodedImageFormat {
|
||||
Png,
|
||||
}
|
||||
|
||||
impl EncodedImageFormat {
|
||||
pub fn label(self) -> &'static str {
|
||||
match self {
|
||||
EncodedImageFormat::Png => "PNG",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PastedImageInfo {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub encoded_format: EncodedImageFormat, // Always PNG for now.
|
||||
}
|
||||
|
||||
/// Capture image from system clipboard, encode to PNG, and return bytes + info.
|
||||
pub fn paste_image_as_png() -> Result<(Vec<u8>, PastedImageInfo), PasteImageError> {
|
||||
tracing::debug!("attempting clipboard image read");
|
||||
let mut cb = arboard::Clipboard::new()
|
||||
.map_err(|e| PasteImageError::ClipboardUnavailable(e.to_string()))?;
|
||||
let img = cb
|
||||
.get_image()
|
||||
.map_err(|e| PasteImageError::NoImage(e.to_string()))?;
|
||||
let w = img.width as u32;
|
||||
let h = img.height as u32;
|
||||
|
||||
let mut png: Vec<u8> = Vec::new();
|
||||
let Some(rgba_img) = image::RgbaImage::from_raw(w, h, img.bytes.into_owned()) else {
|
||||
return Err(PasteImageError::EncodeFailed("invalid RGBA buffer".into()));
|
||||
};
|
||||
let dyn_img = image::DynamicImage::ImageRgba8(rgba_img);
|
||||
tracing::debug!("clipboard image decoded RGBA {w}x{h}");
|
||||
{
|
||||
let mut cursor = std::io::Cursor::new(&mut png);
|
||||
dyn_img
|
||||
.write_to(&mut cursor, image::ImageFormat::Png)
|
||||
.map_err(|e| PasteImageError::EncodeFailed(e.to_string()))?;
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"clipboard image encoded to PNG ({len} bytes)",
|
||||
len = png.len()
|
||||
);
|
||||
Ok((
|
||||
png,
|
||||
PastedImageInfo {
|
||||
width: w,
|
||||
height: h,
|
||||
encoded_format: EncodedImageFormat::Png,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
/// Convenience: write to a temp file and return its path + info.
|
||||
pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImageError> {
|
||||
let (png, info) = paste_image_as_png()?;
|
||||
// Create a unique temporary file with a .png suffix to avoid collisions.
|
||||
let tmp = Builder::new()
|
||||
.prefix("codex-clipboard-")
|
||||
.suffix(".png")
|
||||
.tempfile()
|
||||
.map_err(|e| PasteImageError::IoError(e.to_string()))?;
|
||||
std::fs::write(tmp.path(), &png).map_err(|e| PasteImageError::IoError(e.to_string()))?;
|
||||
// Persist the file (so it remains after the handle is dropped) and return its PathBuf.
|
||||
let (_file, path) = tmp
|
||||
.keep()
|
||||
.map_err(|e| PasteImageError::IoError(e.error.to_string()))?;
|
||||
Ok((path, info))
|
||||
}
|
||||
@@ -70,20 +70,6 @@ impl Frame<'_> {
|
||||
/// Usually the area argument is the size of the current frame or a sub-area of the current
|
||||
/// frame (which can be obtained using [`Layout`] to split the total area).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use ratatui::{backend::TestBackend, Terminal};
|
||||
/// # let backend = TestBackend::new(5, 5);
|
||||
/// # let mut terminal = Terminal::new(backend).unwrap();
|
||||
/// # let mut frame = terminal.get_frame();
|
||||
/// use ratatui::{layout::Rect, widgets::Block};
|
||||
///
|
||||
/// let block = Block::new();
|
||||
/// let area = Rect::new(0, 0, 5, 5);
|
||||
/// frame.render_widget(block, area);
|
||||
/// ```
|
||||
///
|
||||
/// [`Layout`]: crate::layout::Layout
|
||||
pub fn render_widget<W: Widget>(&mut self, widget: W, area: Rect) {
|
||||
widget.render(area, self.buffer);
|
||||
@@ -93,22 +79,6 @@ impl Frame<'_> {
|
||||
///
|
||||
/// Usually the area argument is the size of the current frame or a sub-area of the current
|
||||
/// frame (which can be obtained using [`Layout`] to split the total area).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # #[cfg(feature = "unstable-widget-ref")] {
|
||||
/// # use ratatui::{backend::TestBackend, Terminal};
|
||||
/// # let backend = TestBackend::new(5, 5);
|
||||
/// # let mut terminal = Terminal::new(backend).unwrap();
|
||||
/// # let mut frame = terminal.get_frame();
|
||||
/// use ratatui::{layout::Rect, widgets::Block};
|
||||
///
|
||||
/// let block = Block::new();
|
||||
/// let area = Rect::new(0, 0, 5, 5);
|
||||
/// frame.render_widget_ref(block, area);
|
||||
/// # }
|
||||
/// ```
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn render_widget_ref<W: WidgetRef>(&mut self, widget: W, area: Rect) {
|
||||
widget.render_ref(area, self.buffer);
|
||||
@@ -122,24 +92,6 @@ impl Frame<'_> {
|
||||
/// The last argument should be an instance of the [`StatefulWidget::State`] associated to the
|
||||
/// given [`StatefulWidget`].
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use ratatui::{backend::TestBackend, Terminal};
|
||||
/// # let backend = TestBackend::new(5, 5);
|
||||
/// # let mut terminal = Terminal::new(backend).unwrap();
|
||||
/// # let mut frame = terminal.get_frame();
|
||||
/// use ratatui::{
|
||||
/// layout::Rect,
|
||||
/// widgets::{List, ListItem, ListState},
|
||||
/// };
|
||||
///
|
||||
/// let mut state = ListState::default().with_selected(Some(1));
|
||||
/// let list = List::new(vec![ListItem::new("Item 1"), ListItem::new("Item 2")]);
|
||||
/// let area = Rect::new(0, 0, 5, 5);
|
||||
/// frame.render_stateful_widget(list, area, &mut state);
|
||||
/// ```
|
||||
///
|
||||
/// [`Layout`]: crate::layout::Layout
|
||||
pub fn render_stateful_widget<W>(&mut self, widget: W, area: Rect, state: &mut W::State)
|
||||
where
|
||||
@@ -156,26 +108,6 @@ impl Frame<'_> {
|
||||
///
|
||||
/// The last argument should be an instance of the [`StatefulWidgetRef::State`] associated to
|
||||
/// the given [`StatefulWidgetRef`].
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # #[cfg(feature = "unstable-widget-ref")] {
|
||||
/// # use ratatui::{backend::TestBackend, Terminal};
|
||||
/// # let backend = TestBackend::new(5, 5);
|
||||
/// # let mut terminal = Terminal::new(backend).unwrap();
|
||||
/// # let mut frame = terminal.get_frame();
|
||||
/// use ratatui::{
|
||||
/// layout::Rect,
|
||||
/// widgets::{List, ListItem, ListState},
|
||||
/// };
|
||||
///
|
||||
/// let mut state = ListState::default().with_selected(Some(1));
|
||||
/// let list = List::new(vec![ListItem::new("Item 1"), ListItem::new("Item 2")]);
|
||||
/// let area = Rect::new(0, 0, 5, 5);
|
||||
/// frame.render_stateful_widget_ref(list, area, &mut state);
|
||||
/// # }
|
||||
/// ```
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn render_stateful_widget_ref<W>(&mut self, widget: W, area: Rect, state: &mut W::State)
|
||||
where
|
||||
@@ -216,17 +148,6 @@ impl Frame<'_> {
|
||||
/// This count is particularly useful when dealing with dynamic content or animations where the
|
||||
/// state of the display changes over time. By tracking the frame count, developers can
|
||||
/// synchronize updates or changes to the content with the rendering process.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// # use ratatui::{backend::TestBackend, Terminal};
|
||||
/// # let backend = TestBackend::new(5, 5);
|
||||
/// # let mut terminal = Terminal::new(backend).unwrap();
|
||||
/// # let mut frame = terminal.get_frame();
|
||||
/// let current_count = frame.count();
|
||||
/// println!("Current frame count: {}", current_count);
|
||||
/// ```
|
||||
pub const fn count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
@@ -277,19 +198,6 @@ where
|
||||
B: Backend,
|
||||
{
|
||||
/// Creates a new [`Terminal`] with the given [`Backend`] and [`TerminalOptions`].
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::io::stdout;
|
||||
///
|
||||
/// use ratatui::{backend::CrosstermBackend, layout::Rect, Terminal, TerminalOptions, Viewport};
|
||||
///
|
||||
/// let backend = CrosstermBackend::new(stdout());
|
||||
/// let viewport = Viewport::Fixed(Rect::new(0, 0, 10, 10));
|
||||
/// let terminal = Terminal::with_options(backend, TerminalOptions { viewport })?;
|
||||
/// # std::io::Result::Ok(())
|
||||
/// ```
|
||||
pub fn with_options(mut backend: B) -> io::Result<Self> {
|
||||
let screen_size = backend.size()?;
|
||||
let cursor_pos = backend.get_cursor_position()?;
|
||||
@@ -394,29 +302,6 @@ where
|
||||
/// previous frame to determine what has changed, and only the changes are written to the
|
||||
/// terminal. If the render callback does not fully render the frame, the terminal will not be
|
||||
/// in a consistent state.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # let backend = ratatui::backend::TestBackend::new(10, 10);
|
||||
/// # let mut terminal = ratatui::Terminal::new(backend)?;
|
||||
/// use ratatui::{layout::Position, widgets::Paragraph};
|
||||
///
|
||||
/// // with a closure
|
||||
/// terminal.draw(|frame| {
|
||||
/// let area = frame.area();
|
||||
/// frame.render_widget(Paragraph::new("Hello World!"), area);
|
||||
/// frame.set_cursor_position(Position { x: 0, y: 0 });
|
||||
/// })?;
|
||||
///
|
||||
/// // or with a function
|
||||
/// terminal.draw(render)?;
|
||||
///
|
||||
/// fn render(frame: &mut ratatui::Frame) {
|
||||
/// frame.render_widget(Paragraph::new("Hello World!"), frame.area());
|
||||
/// }
|
||||
/// # std::io::Result::Ok(())
|
||||
/// ```
|
||||
pub fn draw<F>(&mut self, render_callback: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce(&mut Frame),
|
||||
@@ -462,36 +347,6 @@ where
|
||||
/// previous frame to determine what has changed, and only the changes are written to the
|
||||
/// terminal. If the render function does not fully render the frame, the terminal will not be
|
||||
/// in a consistent state.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```should_panic
|
||||
/// # use ratatui::layout::Position;;
|
||||
/// # let backend = ratatui::backend::TestBackend::new(10, 10);
|
||||
/// # let mut terminal = ratatui::Terminal::new(backend)?;
|
||||
/// use std::io;
|
||||
///
|
||||
/// use ratatui::widgets::Paragraph;
|
||||
///
|
||||
/// // with a closure
|
||||
/// terminal.try_draw(|frame| {
|
||||
/// let value: u8 = "not a number".parse().map_err(io::Error::other)?;
|
||||
/// let area = frame.area();
|
||||
/// frame.render_widget(Paragraph::new("Hello World!"), area);
|
||||
/// frame.set_cursor_position(Position { x: 0, y: 0 });
|
||||
/// io::Result::Ok(())
|
||||
/// })?;
|
||||
///
|
||||
/// // or with a function
|
||||
/// terminal.try_draw(render)?;
|
||||
///
|
||||
/// fn render(frame: &mut ratatui::Frame) -> io::Result<()> {
|
||||
/// let value: u8 = "not a number".parse().map_err(io::Error::other)?;
|
||||
/// frame.render_widget(Paragraph::new("Hello World!"), frame.area());
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # io::Result::Ok(())
|
||||
/// ```
|
||||
pub fn try_draw<F, E>(&mut self, render_callback: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce(&mut Frame) -> Result<(), E>,
|
||||
|
||||
@@ -212,7 +212,18 @@ fn render_patch_details(changes: &HashMap<PathBuf, FileChange>) -> Vec<RtLine<'s
|
||||
move_path: _,
|
||||
} => {
|
||||
if let Ok(patch) = diffy::Patch::from_str(unified_diff) {
|
||||
let mut is_first_hunk = true;
|
||||
for h in patch.hunks() {
|
||||
// Render a simple separator between non-contiguous hunks
|
||||
// instead of diff-style @@ headers.
|
||||
if !is_first_hunk {
|
||||
out.push(RtLine::from(vec![
|
||||
RtSpan::raw(" "),
|
||||
RtSpan::styled("⋮", style_dim()),
|
||||
]));
|
||||
}
|
||||
is_first_hunk = false;
|
||||
|
||||
let mut old_ln = h.old_range().start();
|
||||
let mut new_ln = h.new_range().start();
|
||||
for l in h.lines() {
|
||||
@@ -276,8 +287,7 @@ fn push_wrapped_diff_line(
|
||||
// ("+"/"-" for inserts/deletes, or a space for context lines) so alignment
|
||||
// stays consistent across all diff lines.
|
||||
let gap_after_ln = SPACES_AFTER_LINE_NUMBER.saturating_sub(ln_str.len());
|
||||
let first_prefix_cols = indent.len() + ln_str.len() + gap_after_ln;
|
||||
let cont_prefix_cols = indent.len() + ln_str.len() + gap_after_ln;
|
||||
let prefix_cols = indent.len() + ln_str.len() + gap_after_ln;
|
||||
|
||||
let mut first = true;
|
||||
let (sign_opt, line_style) = match kind {
|
||||
@@ -286,16 +296,14 @@ fn push_wrapped_diff_line(
|
||||
DiffLineType::Context => (None, None),
|
||||
};
|
||||
let mut lines: Vec<RtLine<'static>> = Vec::new();
|
||||
while !remaining_text.is_empty() {
|
||||
let prefix_cols = if first {
|
||||
first_prefix_cols
|
||||
} else {
|
||||
cont_prefix_cols
|
||||
};
|
||||
|
||||
loop {
|
||||
// Fit the content for the current terminal row:
|
||||
// compute how many columns are available after the prefix, then split
|
||||
// at a UTF-8 character boundary so this row's chunk fits exactly.
|
||||
let available_content_cols = term_cols.saturating_sub(prefix_cols).max(1);
|
||||
let available_content_cols = term_cols
|
||||
.saturating_sub(if first { prefix_cols + 1 } else { prefix_cols })
|
||||
.max(1);
|
||||
let split_at_byte_index = remaining_text
|
||||
.char_indices()
|
||||
.nth(available_content_cols)
|
||||
@@ -341,6 +349,9 @@ fn push_wrapped_diff_line(
|
||||
}
|
||||
lines.push(line);
|
||||
}
|
||||
if remaining_text.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
lines
|
||||
}
|
||||
@@ -430,4 +441,72 @@ mod tests {
|
||||
// Render into a small terminal to capture the visual layout
|
||||
snapshot_lines("wrap_behavior_insert", lines, DEFAULT_WRAP_COLS + 10, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ui_snapshot_single_line_replacement_counts() {
|
||||
// Reproduce: one deleted line replaced by one inserted line, no extra context
|
||||
let original = "# Codex CLI (Rust Implementation)\n";
|
||||
let modified = "# Codex CLI (Rust Implementation) banana\n";
|
||||
let patch = diffy::create_patch(original, modified).to_string();
|
||||
|
||||
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
|
||||
changes.insert(
|
||||
PathBuf::from("README.md"),
|
||||
FileChange::Update {
|
||||
unified_diff: patch,
|
||||
move_path: None,
|
||||
},
|
||||
);
|
||||
|
||||
let lines =
|
||||
create_diff_summary("proposed patch", &changes, PatchEventType::ApprovalRequest);
|
||||
|
||||
snapshot_lines("single_line_replacement_counts", lines, 80, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ui_snapshot_blank_context_line() {
|
||||
// Ensure a hunk that includes a blank context line at the beginning is rendered visibly
|
||||
let original = "\nY\n";
|
||||
let modified = "\nY changed\n";
|
||||
let patch = diffy::create_patch(original, modified).to_string();
|
||||
|
||||
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
|
||||
changes.insert(
|
||||
PathBuf::from("example.txt"),
|
||||
FileChange::Update {
|
||||
unified_diff: patch,
|
||||
move_path: None,
|
||||
},
|
||||
);
|
||||
|
||||
let lines =
|
||||
create_diff_summary("proposed patch", &changes, PatchEventType::ApprovalRequest);
|
||||
|
||||
snapshot_lines("blank_context_line", lines, 80, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ui_snapshot_vertical_ellipsis_between_hunks() {
|
||||
// Create a patch with two separate hunks to ensure we render the vertical ellipsis (⋮)
|
||||
let original =
|
||||
"line 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\n";
|
||||
let modified = "line 1\nline two changed\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline nine changed\nline 10\n";
|
||||
let patch = diffy::create_patch(original, modified).to_string();
|
||||
|
||||
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
|
||||
changes.insert(
|
||||
PathBuf::from("example.txt"),
|
||||
FileChange::Update {
|
||||
unified_diff: patch,
|
||||
move_path: None,
|
||||
},
|
||||
);
|
||||
|
||||
let lines =
|
||||
create_diff_summary("proposed patch", &changes, PatchEventType::ApprovalRequest);
|
||||
|
||||
// Height is large enough to show both hunks and the separator
|
||||
snapshot_lines("vertical_ellipsis_between_hunks", lines, 80, 16);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::diff_render::create_diff_summary;
|
||||
use crate::exec_command::relativize_to_home;
|
||||
use crate::exec_command::strip_bash_lc_and_escape;
|
||||
use crate::markdown::append_markdown;
|
||||
use crate::slash_command::SlashCommand;
|
||||
use crate::text_formatting::format_and_truncate_tool_result;
|
||||
use base64::Engine;
|
||||
@@ -11,6 +12,7 @@ use codex_core::config::Config;
|
||||
use codex_core::plan_tool::PlanItemArg;
|
||||
use codex_core::plan_tool::StepStatus;
|
||||
use codex_core::plan_tool::UpdatePlanArgs;
|
||||
use codex_core::project_doc::discover_project_doc_paths;
|
||||
use codex_core::protocol::FileChange;
|
||||
use codex_core::protocol::McpInvocation;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
@@ -39,7 +41,7 @@ use std::time::Instant;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct CommandOutput {
|
||||
pub(crate) exit_code: i32,
|
||||
pub(crate) stdout: String,
|
||||
@@ -54,9 +56,13 @@ pub(crate) enum PatchEventType {
|
||||
/// Represents an event to display in the conversation history. Returns its
|
||||
/// `Vec<Line<'static>>` representation to make it easier to display in a
|
||||
/// scrollable list.
|
||||
pub(crate) trait HistoryCell {
|
||||
pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync {
|
||||
fn display_lines(&self) -> Vec<Line<'static>>;
|
||||
|
||||
fn transcript_lines(&self) -> Vec<Line<'static>> {
|
||||
self.display_lines()
|
||||
}
|
||||
|
||||
fn desired_height(&self, width: u16) -> u16 {
|
||||
Paragraph::new(Text::from(self.display_lines()))
|
||||
.wrap(Wrap { trim: false })
|
||||
@@ -66,6 +72,7 @@ pub(crate) trait HistoryCell {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PlainHistoryCell {
|
||||
lines: Vec<Line<'static>>,
|
||||
}
|
||||
@@ -76,6 +83,22 @@ impl HistoryCell for PlainHistoryCell {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TranscriptOnlyHistoryCell {
|
||||
lines: Vec<Line<'static>>,
|
||||
}
|
||||
|
||||
impl HistoryCell for TranscriptOnlyHistoryCell {
|
||||
fn display_lines(&self) -> Vec<Line<'static>> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
fn transcript_lines(&self) -> Vec<Line<'static>> {
|
||||
self.lines.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ExecCell {
|
||||
pub(crate) command: Vec<String>,
|
||||
pub(crate) parsed: Vec<ParsedCommand>,
|
||||
@@ -101,6 +124,7 @@ impl WidgetRef for &ExecCell {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CompletedMcpToolCallWithImageOutput {
|
||||
_image: DynamicImage,
|
||||
}
|
||||
@@ -482,20 +506,6 @@ pub(crate) fn new_completed_mcp_tool_call(
|
||||
Box::new(PlainHistoryCell { lines })
|
||||
}
|
||||
|
||||
pub(crate) fn new_diff_output(message: String) -> PlainHistoryCell {
|
||||
let mut lines: Vec<Line<'static>> = Vec::new();
|
||||
lines.push(Line::from("/diff".magenta()));
|
||||
|
||||
if message.trim().is_empty() {
|
||||
lines.push(Line::from("No changes detected.".italic()));
|
||||
} else {
|
||||
lines.extend(message.lines().map(ansi_escape_line));
|
||||
}
|
||||
|
||||
lines.push(Line::from(""));
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_status_output(
|
||||
config: &Config,
|
||||
usage: &TokenUsage,
|
||||
@@ -538,6 +548,54 @@ pub(crate) fn new_status_output(
|
||||
sandbox_name.into(),
|
||||
]));
|
||||
|
||||
// AGENTS.md files discovered via core's project_doc logic
|
||||
let agents_list = {
|
||||
match discover_project_doc_paths(config) {
|
||||
Ok(paths) => {
|
||||
let mut rels: Vec<String> = Vec::new();
|
||||
for p in paths {
|
||||
let display = if let Some(parent) = p.parent() {
|
||||
if parent == config.cwd {
|
||||
"AGENTS.md".to_string()
|
||||
} else {
|
||||
let mut cur = config.cwd.as_path();
|
||||
let mut ups = 0usize;
|
||||
let mut reached = false;
|
||||
while let Some(c) = cur.parent() {
|
||||
if cur == parent {
|
||||
reached = true;
|
||||
break;
|
||||
}
|
||||
cur = c;
|
||||
ups += 1;
|
||||
}
|
||||
if reached {
|
||||
format!("{}AGENTS.md", "../".repeat(ups))
|
||||
} else if let Ok(stripped) = p.strip_prefix(&config.cwd) {
|
||||
stripped.display().to_string()
|
||||
} else {
|
||||
p.display().to_string()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
p.display().to_string()
|
||||
};
|
||||
rels.push(display);
|
||||
}
|
||||
rels
|
||||
}
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
};
|
||||
if agents_list.is_empty() {
|
||||
lines.push(Line::from(" • AGENTS files: (none)"));
|
||||
} else {
|
||||
lines.push(Line::from(vec![
|
||||
" • AGENTS files: ".into(),
|
||||
agents_list.join(", ").into(),
|
||||
]));
|
||||
}
|
||||
|
||||
lines.push(Line::from(""));
|
||||
|
||||
// 👤 Account (only if ChatGPT tokens exist), shown under the first block
|
||||
@@ -642,6 +700,15 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell {
|
||||
Line::from(vec!["🔌 ".into(), "MCP Tools".bold()]),
|
||||
Line::from(""),
|
||||
Line::from(" • No MCP servers configured.".italic()),
|
||||
Line::from(vec![
|
||||
" See the ".into(),
|
||||
Span::styled(
|
||||
"\u{1b}]8;;https://github.com/openai/codex/blob/main/codex-rs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}",
|
||||
Style::default().add_modifier(Modifier::UNDERLINED),
|
||||
),
|
||||
" to configure them.".into(),
|
||||
])
|
||||
.style(Style::default().add_modifier(Modifier::DIM)),
|
||||
Line::from(""),
|
||||
];
|
||||
|
||||
@@ -719,6 +786,12 @@ pub(crate) fn new_error_event(message: String) -> PlainHistoryCell {
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_stream_error_event(message: String) -> PlainHistoryCell {
|
||||
let lines: Vec<Line<'static>> =
|
||||
vec![vec!["⚠ ".magenta().bold(), message.dim()].into(), "".into()];
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
/// Render a user‑friendly plan update styled like a checkbox todo list.
|
||||
pub(crate) fn new_plan_update(update: UpdatePlanArgs) -> PlainHistoryCell {
|
||||
let UpdatePlanArgs { explanation, plan } = update;
|
||||
@@ -921,6 +994,17 @@ pub(crate) fn new_patch_apply_success(stdout: String) -> PlainHistoryCell {
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_reasoning_block(
|
||||
full_reasoning_buffer: String,
|
||||
config: &Config,
|
||||
) -> TranscriptOnlyHistoryCell {
|
||||
let mut lines: Vec<Line<'static>> = Vec::new();
|
||||
lines.push(Line::from("thinking".magenta().italic()));
|
||||
append_markdown(&full_reasoning_buffer, &mut lines, config);
|
||||
lines.push(Line::from(""));
|
||||
TranscriptOnlyHistoryCell { lines }
|
||||
}
|
||||
|
||||
fn output_lines(
|
||||
output: Option<&CommandOutput>,
|
||||
only_err: bool,
|
||||
|
||||
@@ -12,6 +12,7 @@ use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_config_as_toml_with_cli_overrides;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
@@ -30,6 +31,7 @@ mod bottom_pane;
|
||||
mod chatwidget;
|
||||
mod citation_regex;
|
||||
mod cli;
|
||||
mod clipboard_paste;
|
||||
mod common;
|
||||
pub mod custom_terminal;
|
||||
mod diff_render;
|
||||
@@ -49,6 +51,7 @@ mod slash_command;
|
||||
mod status_indicator_widget;
|
||||
mod streaming;
|
||||
mod text_formatting;
|
||||
mod transcript_app;
|
||||
mod tui;
|
||||
mod user_approval_widget;
|
||||
|
||||
@@ -219,38 +222,6 @@ pub async fn run_main(
|
||||
|
||||
let _ = tracing_subscriber::registry().with(file_layer).try_init();
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
if let Some(latest_version) = updates::get_upgrade_version(&config) {
|
||||
let current_version = env!("CARGO_PKG_VERSION");
|
||||
let exe = std::env::current_exe()?;
|
||||
let managed_by_npm = std::env::var_os("CODEX_MANAGED_BY_NPM").is_some();
|
||||
|
||||
eprintln!(
|
||||
"{} {current_version} -> {latest_version}.",
|
||||
"✨⬆️ Update available!".bold().cyan()
|
||||
);
|
||||
|
||||
if managed_by_npm {
|
||||
let npm_cmd = "npm install -g @openai/codex@latest";
|
||||
eprintln!("Run {} to update.", npm_cmd.cyan().on_black());
|
||||
} else if cfg!(target_os = "macos")
|
||||
&& (exe.starts_with("/opt/homebrew") || exe.starts_with("/usr/local"))
|
||||
{
|
||||
let brew_cmd = "brew upgrade codex";
|
||||
eprintln!("Run {} to update.", brew_cmd.cyan().on_black());
|
||||
} else {
|
||||
eprintln!(
|
||||
"See {} for the latest releases and installation options.",
|
||||
"https://github.com/openai/codex/releases/latest"
|
||||
.cyan()
|
||||
.on_black()
|
||||
);
|
||||
}
|
||||
|
||||
eprintln!("");
|
||||
}
|
||||
|
||||
run_ratatui_app(cli, config, should_show_trust_screen)
|
||||
.await
|
||||
.map_err(|err| std::io::Error::other(err.to_string()))
|
||||
@@ -275,13 +246,62 @@ async fn run_ratatui_app(
|
||||
}));
|
||||
let mut terminal = tui::init()?;
|
||||
terminal.clear()?;
|
||||
|
||||
let mut tui = Tui::new(terminal);
|
||||
|
||||
// Show update banner in terminal history (instead of stderr) so it is visible
|
||||
// within the TUI scrollback. Building spans keeps styling consistent.
|
||||
#[cfg(not(debug_assertions))]
|
||||
if let Some(latest_version) = updates::get_upgrade_version(&config) {
|
||||
use ratatui::style::Stylize as _;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::text::Span;
|
||||
|
||||
let current_version = env!("CARGO_PKG_VERSION");
|
||||
let exe = std::env::current_exe()?;
|
||||
let managed_by_npm = std::env::var_os("CODEX_MANAGED_BY_NPM").is_some();
|
||||
|
||||
let mut lines: Vec<Line<'static>> = Vec::new();
|
||||
lines.push(Line::from(vec![
|
||||
"✨⬆️ Update available!".bold().cyan(),
|
||||
Span::raw(" "),
|
||||
Span::raw(format!("{current_version} -> {latest_version}.")),
|
||||
]));
|
||||
|
||||
if managed_by_npm {
|
||||
let npm_cmd = "npm install -g @openai/codex@latest";
|
||||
lines.push(Line::from(vec![
|
||||
Span::raw("Run "),
|
||||
npm_cmd.cyan(),
|
||||
Span::raw(" to update."),
|
||||
]));
|
||||
} else if cfg!(target_os = "macos")
|
||||
&& (exe.starts_with("/opt/homebrew") || exe.starts_with("/usr/local"))
|
||||
{
|
||||
let brew_cmd = "brew upgrade codex";
|
||||
lines.push(Line::from(vec![
|
||||
Span::raw("Run "),
|
||||
brew_cmd.cyan(),
|
||||
Span::raw(" to update."),
|
||||
]));
|
||||
} else {
|
||||
lines.push(Line::from(vec![
|
||||
Span::raw("See "),
|
||||
"https://github.com/openai/codex/releases/latest".cyan(),
|
||||
Span::raw(" for the latest releases and installation options."),
|
||||
]));
|
||||
}
|
||||
|
||||
lines.push(Line::from(""));
|
||||
tui.insert_history_lines(lines);
|
||||
}
|
||||
|
||||
// Initialize high-fidelity session event logging if enabled.
|
||||
session_log::maybe_init(&config);
|
||||
|
||||
let Cli { prompt, images, .. } = cli;
|
||||
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let login_status = get_login_status(&config);
|
||||
let should_show_onboarding =
|
||||
should_show_onboarding(login_status, &config, should_show_trust_screen);
|
||||
@@ -294,6 +314,7 @@ async fn run_ratatui_app(
|
||||
show_trust_screen: should_show_trust_screen,
|
||||
login_status,
|
||||
preferred_auth_method: config.preferred_auth_method,
|
||||
auth_manager: auth_manager.clone(),
|
||||
},
|
||||
&mut tui,
|
||||
)
|
||||
@@ -304,7 +325,7 @@ async fn run_ratatui_app(
|
||||
}
|
||||
}
|
||||
|
||||
let app_result = App::run(&mut tui, config, prompt, images).await;
|
||||
let app_result = App::run(&mut tui, auth_manager, config, prompt, images).await;
|
||||
|
||||
restore();
|
||||
// Mark the end of the recorded session.
|
||||
|
||||
@@ -45,22 +45,6 @@ impl MarkdownStreamCollector {
|
||||
self.buffer.push_str(delta);
|
||||
}
|
||||
|
||||
/// Insert a paragraph/section separator if one is not already present at the
|
||||
/// end of the buffer. Ensures the next content starts after a blank line.
|
||||
pub fn insert_section_break(&mut self) {
|
||||
if self.buffer.is_empty() {
|
||||
return;
|
||||
}
|
||||
if self.buffer.ends_with("\n\n") {
|
||||
return;
|
||||
}
|
||||
if self.buffer.ends_with('\n') {
|
||||
self.buffer.push('\n');
|
||||
} else {
|
||||
self.buffer.push_str("\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
/// Render the full buffer and return only the newly completed logical lines
|
||||
/// since the last commit. When the buffer does not end with a newline, the
|
||||
/// final rendered line is considered incomplete and is not emitted.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CLIENT_ID;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
@@ -112,6 +113,7 @@ pub(crate) struct AuthModeWidget {
|
||||
pub codex_home: PathBuf,
|
||||
pub login_status: LoginStatus,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
pub auth_manager: Arc<AuthManager>,
|
||||
}
|
||||
|
||||
impl AuthModeWidget {
|
||||
@@ -338,6 +340,7 @@ impl AuthModeWidget {
|
||||
Ok(child) => {
|
||||
let sign_in_state = self.sign_in_state.clone();
|
||||
let request_frame = self.request_frame.clone();
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
tokio::spawn(async move {
|
||||
let auth_url = child.auth_url.clone();
|
||||
{
|
||||
@@ -351,6 +354,9 @@ impl AuthModeWidget {
|
||||
let r = child.block_until_done().await;
|
||||
match r {
|
||||
Ok(()) => {
|
||||
// Force the auth manager to reload the new auth information.
|
||||
auth_manager.reload();
|
||||
|
||||
*sign_in_state.write().unwrap() = SignInState::ChatGptSuccessMessage;
|
||||
request_frame.schedule_frame();
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use codex_core::util::is_inside_git_repo;
|
||||
use codex_login::AuthManager;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyEventKind;
|
||||
@@ -58,6 +59,7 @@ pub(crate) struct OnboardingScreenArgs {
|
||||
pub show_login_screen: bool,
|
||||
pub login_status: LoginStatus,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
pub auth_manager: Arc<AuthManager>,
|
||||
}
|
||||
|
||||
impl OnboardingScreen {
|
||||
@@ -69,6 +71,7 @@ impl OnboardingScreen {
|
||||
show_login_screen,
|
||||
login_status,
|
||||
preferred_auth_method,
|
||||
auth_manager,
|
||||
} = args;
|
||||
let mut steps: Vec<Step> = vec![Step::Welcome(WelcomeWidget {
|
||||
is_logged_in: !matches!(login_status, LoginStatus::NotAuthenticated),
|
||||
@@ -82,6 +85,7 @@ impl OnboardingScreen {
|
||||
codex_home: codex_home.clone(),
|
||||
login_status,
|
||||
preferred_auth_method,
|
||||
auth_manager,
|
||||
}))
|
||||
}
|
||||
let is_git_repo = is_inside_git_repo(&cwd);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_core::config::set_project_trusted;
|
||||
use codex_core::git_info::resolve_root_git_project_for_trust;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use ratatui::buffer::Buffer;
|
||||
@@ -144,10 +145,11 @@ impl StepStateProvider for TrustDirectoryWidget {
|
||||
|
||||
impl TrustDirectoryWidget {
|
||||
fn handle_trust(&mut self) {
|
||||
if let Err(e) = set_project_trusted(&self.codex_home, &self.cwd) {
|
||||
let target =
|
||||
resolve_root_git_project_for_trust(&self.cwd).unwrap_or_else(|| self.cwd.clone());
|
||||
if let Err(e) = set_project_trusted(&self.codex_home, &target) {
|
||||
tracing::error!("Failed to set project trusted: {e:?}");
|
||||
self.error = Some(e.to_string());
|
||||
// self.error = Some("Failed to set project trusted".to_string());
|
||||
self.error = Some(format!("Failed to set trust for {}: {e}", target.display()));
|
||||
}
|
||||
|
||||
self.selection = Some(TrustDirectorySelection::Trust);
|
||||
|
||||
@@ -132,18 +132,16 @@ pub(crate) fn log_inbound_app_event(event: &AppEvent) {
|
||||
AppEvent::CodexEvent(ev) => {
|
||||
write_record("to_tui", "codex_event", ev);
|
||||
}
|
||||
|
||||
AppEvent::DispatchCommand(cmd) => {
|
||||
AppEvent::NewSession => {
|
||||
let value = json!({
|
||||
"ts": now_ts(),
|
||||
"dir": "to_tui",
|
||||
"kind": "slash_command",
|
||||
"command": format!("{:?}", cmd),
|
||||
"kind": "new_session",
|
||||
});
|
||||
LOGGER.write_json_line(value);
|
||||
}
|
||||
// Internal UI events; still log for fidelity, but avoid heavy payloads.
|
||||
AppEvent::InsertHistory(lines) => {
|
||||
AppEvent::InsertHistoryLines(lines) => {
|
||||
let value = json!({
|
||||
"ts": now_ts(),
|
||||
"dir": "to_tui",
|
||||
@@ -152,6 +150,15 @@ pub(crate) fn log_inbound_app_event(event: &AppEvent) {
|
||||
});
|
||||
LOGGER.write_json_line(value);
|
||||
}
|
||||
AppEvent::InsertHistoryCell(cell) => {
|
||||
let value = json!({
|
||||
"ts": now_ts(),
|
||||
"dir": "to_tui",
|
||||
"kind": "insert_history_cell",
|
||||
"lines": cell.transcript_lines().len(),
|
||||
});
|
||||
LOGGER.write_json_line(value);
|
||||
}
|
||||
AppEvent::StartFileSearch(query) => {
|
||||
let value = json!({
|
||||
"ts": now_ts(),
|
||||
|
||||
@@ -22,7 +22,7 @@ pub(crate) fn shimmer_spans(text: &str) -> Vec<Span<'static>> {
|
||||
// Use time-based sweep synchronized to process start.
|
||||
let padding = 10usize;
|
||||
let period = chars.len() + padding * 2;
|
||||
let sweep_seconds = 2.5f32;
|
||||
let sweep_seconds = 2.0f32;
|
||||
let pos_f =
|
||||
(elapsed_since_start().as_secs_f32() % sweep_seconds) / sweep_seconds * (period as f32);
|
||||
let pos = pos_f as usize;
|
||||
@@ -63,9 +63,11 @@ pub(crate) fn shimmer_spans(text: &str) -> Vec<Span<'static>> {
|
||||
}
|
||||
|
||||
fn color_for_level(level: u8) -> Style {
|
||||
if level < 144 {
|
||||
// Tune thresholds so the edges of the shimmer band appear dim
|
||||
// in fallback mode (no true color support).
|
||||
if level < 160 {
|
||||
Style::default().add_modifier(Modifier::DIM)
|
||||
} else if level < 208 {
|
||||
} else if level < 224 {
|
||||
Style::default()
|
||||
} else {
|
||||
Style::default().add_modifier(Modifier::BOLD)
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
source: tui/src/diff_render.rs
|
||||
expression: terminal.backend()
|
||||
---
|
||||
"proposed patch to 1 file (+1 -1) "
|
||||
" └ example.txt "
|
||||
" 1 "
|
||||
" 2 -Y "
|
||||
" 2 +Y changed "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
@@ -0,0 +1,12 @@
|
||||
---
|
||||
source: tui/src/diff_render.rs
|
||||
expression: terminal.backend()
|
||||
---
|
||||
"proposed patch to 1 file (+1 -1) "
|
||||
" └ README.md "
|
||||
" 1 -# Codex CLI (Rust Implementation) "
|
||||
" 1 +# Codex CLI (Rust Implementation) banana "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
source: tui/src/diff_render.rs
|
||||
assertion_line: 380
|
||||
expression: terminal.backend()
|
||||
---
|
||||
"proposed patch to 1 file (+1 -1) "
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
source: tui/src/diff_render.rs
|
||||
expression: terminal.backend()
|
||||
---
|
||||
"proposed patch to 1 file (+2 -2) "
|
||||
" └ example.txt "
|
||||
" 1 line 1 "
|
||||
" 2 -line 2 "
|
||||
" 2 +line two changed "
|
||||
" 3 line 3 "
|
||||
" 4 line 4 "
|
||||
" 5 line 5 "
|
||||
" ⋮ "
|
||||
" 6 line 6 "
|
||||
" 7 line 7 "
|
||||
" 8 line 8 "
|
||||
" 9 -line 9 "
|
||||
" 9 +line nine changed "
|
||||
" 10 line 10 "
|
||||
" "
|
||||
@@ -1,10 +1,9 @@
|
||||
---
|
||||
source: tui/src/diff_render.rs
|
||||
assertion_line: 380
|
||||
expression: terminal.backend()
|
||||
---
|
||||
" 1 +this is a very long line that should wrap across multiple terminal col "
|
||||
" umns and continue "
|
||||
" 1 +this is a very long line that should wrap across multiple terminal co "
|
||||
" lumns and continue "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
|
||||
@@ -30,6 +30,8 @@ pub(crate) struct StatusIndicatorWidget {
|
||||
/// Latest text to display (truncated to the available width at render
|
||||
/// time).
|
||||
text: String,
|
||||
/// Animated header text (defaults to "Working").
|
||||
header: String,
|
||||
|
||||
/// Animation state: reveal target `text` progressively like a typewriter.
|
||||
/// We compute the currently visible prefix length based on the current
|
||||
@@ -47,6 +49,7 @@ impl StatusIndicatorWidget {
|
||||
pub(crate) fn new(app_event_tx: AppEventSender, frame_requester: FrameRequester) -> Self {
|
||||
Self {
|
||||
text: String::from("waiting for model"),
|
||||
header: String::from("Working"),
|
||||
last_target_len: 0,
|
||||
base_frame: 0,
|
||||
reveal_len_at_base: 0,
|
||||
@@ -95,6 +98,13 @@ impl StatusIndicatorWidget {
|
||||
self.app_event_tx.send(AppEvent::CodexOp(Op::Interrupt));
|
||||
}
|
||||
|
||||
/// Update the animated header label (left of the brackets).
|
||||
pub(crate) fn update_header(&mut self, header: String) {
|
||||
if self.header != header {
|
||||
self.header = header;
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset the animation and start revealing `text` from the beginning.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn restart_with_text(&mut self, text: String) {
|
||||
@@ -136,6 +146,15 @@ impl StatusIndicatorWidget {
|
||||
let since_start = self.start_time.elapsed();
|
||||
(since_start.as_millis() / 100) as usize
|
||||
}
|
||||
|
||||
/// Test-only helper to fast-forward the internal clock so animations
|
||||
/// advance without sleeping.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn test_fast_forward_frames(&mut self, frames: usize) {
|
||||
let advance_ms = (frames as u64).saturating_mul(100);
|
||||
// Move the start time into the past so `current_frame()` advances.
|
||||
self.start_time = std::time::Instant::now() - std::time::Duration::from_millis(advance_ms);
|
||||
}
|
||||
}
|
||||
|
||||
impl WidgetRef for StatusIndicatorWidget {
|
||||
@@ -147,12 +166,12 @@ impl WidgetRef for StatusIndicatorWidget {
|
||||
|
||||
// Schedule next animation frame.
|
||||
self.frame_requester
|
||||
.schedule_frame_in(Duration::from_millis(100));
|
||||
.schedule_frame_in(Duration::from_millis(32));
|
||||
let idx = self.current_frame();
|
||||
let elapsed = self.start_time.elapsed().as_secs();
|
||||
let shown_now = self.current_shown_len(idx);
|
||||
let status_prefix: String = self.text.chars().take(shown_now).collect();
|
||||
let animated_spans = shimmer_spans("Working");
|
||||
let animated_spans = shimmer_spans(&self.header);
|
||||
|
||||
// Plain rendering: no borders or padding so the live cell is visually indistinguishable from terminal scrollback.
|
||||
let inner_width = area.width as usize;
|
||||
@@ -226,8 +245,8 @@ mod tests {
|
||||
w.restart_with_text("Hello".to_string());
|
||||
|
||||
let area = ratatui::layout::Rect::new(0, 0, 30, 1);
|
||||
// Allow a short delay so the typewriter reveals the first character.
|
||||
std::thread::sleep(std::time::Duration::from_millis(120));
|
||||
// Advance animation without sleeping.
|
||||
w.test_fast_forward_frames(2);
|
||||
let mut buf = ratatui::buffer::Buffer::empty(area);
|
||||
w.render_ref(area, &mut buf);
|
||||
|
||||
@@ -242,8 +261,8 @@ mod tests {
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy());
|
||||
w.restart_with_text("Hi".to_string());
|
||||
// Ensure some frames elapse so we get a stable state.
|
||||
std::thread::sleep(std::time::Duration::from_millis(120));
|
||||
// Advance animation without sleeping.
|
||||
w.test_fast_forward_frames(2);
|
||||
|
||||
let area = ratatui::layout::Rect::new(0, 0, 30, 1);
|
||||
let mut buf = ratatui::buffer::Buffer::empty(area);
|
||||
@@ -263,7 +282,7 @@ mod tests {
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy());
|
||||
w.restart_with_text("Hello".to_string());
|
||||
std::thread::sleep(std::time::Duration::from_millis(120));
|
||||
w.test_fast_forward_frames(2);
|
||||
|
||||
let area = ratatui::layout::Rect::new(0, 0, 30, 1);
|
||||
let mut buf = ratatui::buffer::Buffer::empty(area);
|
||||
|
||||
@@ -2,7 +2,6 @@ use codex_core::config::Config;
|
||||
use ratatui::text::Line;
|
||||
|
||||
use super::HeaderEmitter;
|
||||
use super::StreamKind;
|
||||
use super::StreamState;
|
||||
|
||||
/// Sink for history insertions and animation control.
|
||||
@@ -18,7 +17,7 @@ pub(crate) struct AppEventHistorySink(pub(crate) crate::app_event_sender::AppEve
|
||||
impl HistorySink for AppEventHistorySink {
|
||||
fn insert_history(&self, lines: Vec<Line<'static>>) {
|
||||
self.0
|
||||
.send(crate::app_event::AppEvent::InsertHistory(lines))
|
||||
.send(crate::app_event::AppEvent::InsertHistoryLines(lines))
|
||||
}
|
||||
fn start_commit_animation(&self) {
|
||||
self.0
|
||||
@@ -36,8 +35,8 @@ type Lines = Vec<Line<'static>>;
|
||||
pub(crate) struct StreamController {
|
||||
config: Config,
|
||||
header: HeaderEmitter,
|
||||
states: [StreamState; 2],
|
||||
current_stream: Option<StreamKind>,
|
||||
state: StreamState,
|
||||
active: bool,
|
||||
finishing_after_drain: bool,
|
||||
}
|
||||
|
||||
@@ -46,8 +45,8 @@ impl StreamController {
|
||||
Self {
|
||||
config,
|
||||
header: HeaderEmitter::new(),
|
||||
states: [StreamState::new(), StreamState::new()],
|
||||
current_stream: None,
|
||||
state: StreamState::new(),
|
||||
active: false,
|
||||
finishing_after_drain: false,
|
||||
}
|
||||
}
|
||||
@@ -57,29 +56,18 @@ impl StreamController {
|
||||
}
|
||||
|
||||
pub(crate) fn is_write_cycle_active(&self) -> bool {
|
||||
self.current_stream.is_some()
|
||||
self.active
|
||||
}
|
||||
|
||||
pub(crate) fn clear_all(&mut self) {
|
||||
self.states.iter_mut().for_each(|s| s.clear());
|
||||
self.current_stream = None;
|
||||
self.state.clear();
|
||||
self.active = false;
|
||||
self.finishing_after_drain = false;
|
||||
// leave header state unchanged; caller decides when to reset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn idx(kind: StreamKind) -> usize {
|
||||
kind as usize
|
||||
}
|
||||
fn state(&self, kind: StreamKind) -> &StreamState {
|
||||
&self.states[Self::idx(kind)]
|
||||
}
|
||||
fn state_mut(&mut self, kind: StreamKind) -> &mut StreamState {
|
||||
&mut self.states[Self::idx(kind)]
|
||||
}
|
||||
|
||||
fn emit_header_if_needed(&mut self, kind: StreamKind, out_lines: &mut Lines) -> bool {
|
||||
self.header.maybe_emit(kind, out_lines)
|
||||
fn emit_header_if_needed(&mut self, out_lines: &mut Lines) -> bool {
|
||||
self.header.maybe_emit(out_lines)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -93,56 +81,23 @@ impl StreamController {
|
||||
}
|
||||
}
|
||||
|
||||
/// Begin a stream, flushing previously completed lines from any other
|
||||
/// active stream to maintain ordering.
|
||||
pub(crate) fn begin(&mut self, kind: StreamKind, sink: &impl HistorySink) {
|
||||
if let Some(current) = self.current_stream
|
||||
&& current != kind
|
||||
{
|
||||
// Synchronously flush completed lines from previous stream.
|
||||
let cfg = self.config.clone();
|
||||
let prev_state = self.state_mut(current);
|
||||
let newly_completed = prev_state.collector.commit_complete_lines(&cfg);
|
||||
if !newly_completed.is_empty() {
|
||||
prev_state.enqueue(newly_completed);
|
||||
}
|
||||
let step = prev_state.drain_all();
|
||||
if !step.history.is_empty() {
|
||||
let mut lines: Lines = Vec::new();
|
||||
self.emit_header_if_needed(current, &mut lines);
|
||||
lines.extend(step.history);
|
||||
// Ensure at most one trailing blank after the flushed block.
|
||||
Self::ensure_single_trailing_blank(&mut lines);
|
||||
sink.insert_history(lines);
|
||||
}
|
||||
self.current_stream = None;
|
||||
}
|
||||
|
||||
if self.current_stream != Some(kind) {
|
||||
let prev = self.current_stream;
|
||||
self.current_stream = Some(kind);
|
||||
// Starting a new stream cancels any pending finish-from-previous-stream animation.
|
||||
self.finishing_after_drain = false;
|
||||
if prev.is_some() {
|
||||
self.header.reset_for_stream(kind);
|
||||
}
|
||||
// Emit header immediately for reasoning; for answers, defer to first commit.
|
||||
if matches!(kind, StreamKind::Reasoning) {
|
||||
let mut header_lines = Vec::new();
|
||||
if self.emit_header_if_needed(kind, &mut header_lines) {
|
||||
sink.insert_history(header_lines);
|
||||
}
|
||||
}
|
||||
/// Begin an answer stream. Does not emit header yet; it is emitted on first commit.
|
||||
pub(crate) fn begin(&mut self, _sink: &impl HistorySink) {
|
||||
// Starting a new stream cancels any pending finish-from-previous-stream animation.
|
||||
if !self.active {
|
||||
self.header.reset_for_stream();
|
||||
}
|
||||
self.finishing_after_drain = false;
|
||||
self.active = true;
|
||||
}
|
||||
|
||||
/// Push a delta; if it contains a newline, commit completed lines and start animation.
|
||||
pub(crate) fn push_and_maybe_commit(&mut self, delta: &str, sink: &impl HistorySink) {
|
||||
let Some(kind) = self.current_stream else {
|
||||
if !self.active {
|
||||
return;
|
||||
};
|
||||
}
|
||||
let cfg = self.config.clone();
|
||||
let state = self.state_mut(kind);
|
||||
let state = &mut self.state;
|
||||
// Record that at least one delta was received for this stream
|
||||
if !delta.is_empty() {
|
||||
state.has_seen_delta = true;
|
||||
@@ -157,42 +112,22 @@ impl StreamController {
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a reasoning section break and commit any newly completed lines.
|
||||
pub(crate) fn insert_reasoning_section_break(&mut self, sink: &impl HistorySink) {
|
||||
if self.current_stream != Some(StreamKind::Reasoning) {
|
||||
self.begin(StreamKind::Reasoning, sink);
|
||||
}
|
||||
let cfg = self.config.clone();
|
||||
let state = self.state_mut(StreamKind::Reasoning);
|
||||
state.collector.insert_section_break();
|
||||
let newly_completed = state.collector.commit_complete_lines(&cfg);
|
||||
if !newly_completed.is_empty() {
|
||||
state.enqueue(newly_completed);
|
||||
sink.start_commit_animation();
|
||||
}
|
||||
}
|
||||
|
||||
/// Finalize the active stream. If `flush_immediately` is true, drain and emit now.
|
||||
pub(crate) fn finalize(
|
||||
&mut self,
|
||||
kind: StreamKind,
|
||||
flush_immediately: bool,
|
||||
sink: &impl HistorySink,
|
||||
) -> bool {
|
||||
if self.current_stream != Some(kind) {
|
||||
pub(crate) fn finalize(&mut self, flush_immediately: bool, sink: &impl HistorySink) -> bool {
|
||||
if !self.active {
|
||||
return false;
|
||||
}
|
||||
let cfg = self.config.clone();
|
||||
// Finalize collector first.
|
||||
let remaining = {
|
||||
let state = self.state_mut(kind);
|
||||
let state = &mut self.state;
|
||||
state.collector.finalize_and_drain(&cfg)
|
||||
};
|
||||
if flush_immediately {
|
||||
// Collect all output first to avoid emitting headers when there is no content.
|
||||
let mut out_lines: Lines = Vec::new();
|
||||
{
|
||||
let state = self.state_mut(kind);
|
||||
let state = &mut self.state;
|
||||
if !remaining.is_empty() {
|
||||
state.enqueue(remaining);
|
||||
}
|
||||
@@ -201,28 +136,28 @@ impl StreamController {
|
||||
}
|
||||
if !out_lines.is_empty() {
|
||||
let mut lines_with_header: Lines = Vec::new();
|
||||
self.emit_header_if_needed(kind, &mut lines_with_header);
|
||||
self.emit_header_if_needed(&mut lines_with_header);
|
||||
lines_with_header.extend(out_lines);
|
||||
Self::ensure_single_trailing_blank(&mut lines_with_header);
|
||||
sink.insert_history(lines_with_header);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
self.state_mut(kind).clear();
|
||||
// Allow a subsequent block of the same kind in this turn to emit its header.
|
||||
self.header.allow_reemit_for_same_kind_in_turn(kind);
|
||||
self.state.clear();
|
||||
// Allow a subsequent block in this turn to emit its header.
|
||||
self.header.allow_reemit_in_turn();
|
||||
// Also clear the per-stream emitted flag so the header can render again.
|
||||
self.header.reset_for_stream(kind);
|
||||
self.current_stream = None;
|
||||
self.header.reset_for_stream();
|
||||
self.active = false;
|
||||
self.finishing_after_drain = false;
|
||||
true
|
||||
} else {
|
||||
if !remaining.is_empty() {
|
||||
let state = self.state_mut(kind);
|
||||
let state = &mut self.state;
|
||||
state.enqueue(remaining);
|
||||
}
|
||||
// Spacer animated out
|
||||
self.state_mut(kind).enqueue(vec![Line::from("")]);
|
||||
self.state.enqueue(vec![Line::from("")]);
|
||||
self.finishing_after_drain = true;
|
||||
sink.start_commit_animation();
|
||||
false
|
||||
@@ -231,32 +166,29 @@ impl StreamController {
|
||||
|
||||
/// Step animation: commit at most one queued line and handle end-of-drain cleanup.
|
||||
pub(crate) fn on_commit_tick(&mut self, sink: &impl HistorySink) -> bool {
|
||||
let Some(kind) = self.current_stream else {
|
||||
if !self.active {
|
||||
return false;
|
||||
};
|
||||
let step = {
|
||||
let state = self.state_mut(kind);
|
||||
state.step()
|
||||
};
|
||||
}
|
||||
let step = { self.state.step() };
|
||||
if !step.history.is_empty() {
|
||||
let mut lines: Lines = Vec::new();
|
||||
self.emit_header_if_needed(kind, &mut lines);
|
||||
self.emit_header_if_needed(&mut lines);
|
||||
let mut out = lines;
|
||||
out.extend(step.history);
|
||||
sink.insert_history(out);
|
||||
}
|
||||
|
||||
let is_idle = self.state(kind).is_idle();
|
||||
let is_idle = self.state.is_idle();
|
||||
if is_idle {
|
||||
sink.stop_commit_animation();
|
||||
if self.finishing_after_drain {
|
||||
// Reset and notify
|
||||
self.state_mut(kind).clear();
|
||||
// Allow a subsequent block of the same kind in this turn to emit its header.
|
||||
self.header.allow_reemit_for_same_kind_in_turn(kind);
|
||||
self.state.clear();
|
||||
// Allow a subsequent block in this turn to emit its header.
|
||||
self.header.allow_reemit_in_turn();
|
||||
// Also clear the per-stream emitted flag so the header can render again.
|
||||
self.header.reset_for_stream(kind);
|
||||
self.current_stream = None;
|
||||
self.header.reset_for_stream();
|
||||
self.active = false;
|
||||
self.finishing_after_drain = false;
|
||||
return true;
|
||||
}
|
||||
@@ -267,24 +199,14 @@ impl StreamController {
|
||||
/// Apply a full final answer: replace queued content with only the remaining tail,
|
||||
/// then finalize immediately and notify completion.
|
||||
pub(crate) fn apply_final_answer(&mut self, message: &str, sink: &impl HistorySink) -> bool {
|
||||
self.apply_full_final(StreamKind::Answer, message, true, sink)
|
||||
self.apply_full_final(message, sink)
|
||||
}
|
||||
|
||||
pub(crate) fn apply_final_reasoning(&mut self, message: &str, sink: &impl HistorySink) -> bool {
|
||||
self.apply_full_final(StreamKind::Reasoning, message, false, sink)
|
||||
}
|
||||
|
||||
fn apply_full_final(
|
||||
&mut self,
|
||||
kind: StreamKind,
|
||||
message: &str,
|
||||
immediate: bool,
|
||||
sink: &impl HistorySink,
|
||||
) -> bool {
|
||||
self.begin(kind, sink);
|
||||
fn apply_full_final(&mut self, message: &str, sink: &impl HistorySink) -> bool {
|
||||
self.begin(sink);
|
||||
|
||||
{
|
||||
let state = self.state_mut(kind);
|
||||
let state = &mut self.state;
|
||||
// Only inject the final full message if we have not seen any deltas for this stream.
|
||||
// If deltas were received, rely on the collector's existing buffer to avoid duplication.
|
||||
if !state.has_seen_delta && !message.is_empty() {
|
||||
@@ -301,7 +223,6 @@ impl StreamController {
|
||||
.replace_with_and_mark_committed(&msg, committed);
|
||||
}
|
||||
}
|
||||
|
||||
self.finalize(kind, immediate, sink)
|
||||
self.finalize(true, sink)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,6 @@ use crate::markdown_stream::AnimatedLineStreamer;
|
||||
use crate::markdown_stream::MarkdownStreamCollector;
|
||||
pub(crate) mod controller;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum StreamKind {
|
||||
Answer,
|
||||
Reasoning,
|
||||
}
|
||||
|
||||
pub(crate) struct StreamState {
|
||||
pub(crate) collector: MarkdownStreamCollector,
|
||||
pub(crate) streamer: AnimatedLineStreamer,
|
||||
@@ -42,92 +36,44 @@ impl StreamState {
|
||||
}
|
||||
|
||||
pub(crate) struct HeaderEmitter {
|
||||
reasoning_emitted_this_turn: bool,
|
||||
answer_emitted_this_turn: bool,
|
||||
reasoning_emitted_in_stream: bool,
|
||||
answer_emitted_in_stream: bool,
|
||||
emitted_this_turn: bool,
|
||||
emitted_in_stream: bool,
|
||||
}
|
||||
|
||||
impl HeaderEmitter {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
reasoning_emitted_this_turn: false,
|
||||
answer_emitted_this_turn: false,
|
||||
reasoning_emitted_in_stream: false,
|
||||
answer_emitted_in_stream: false,
|
||||
emitted_this_turn: false,
|
||||
emitted_in_stream: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn reset_for_new_turn(&mut self) {
|
||||
self.reasoning_emitted_this_turn = false;
|
||||
self.answer_emitted_this_turn = false;
|
||||
self.reasoning_emitted_in_stream = false;
|
||||
self.answer_emitted_in_stream = false;
|
||||
self.emitted_this_turn = false;
|
||||
self.emitted_in_stream = false;
|
||||
}
|
||||
|
||||
pub(crate) fn reset_for_stream(&mut self, kind: StreamKind) {
|
||||
match kind {
|
||||
StreamKind::Reasoning => self.reasoning_emitted_in_stream = false,
|
||||
StreamKind::Answer => self.answer_emitted_in_stream = false,
|
||||
}
|
||||
pub(crate) fn reset_for_stream(&mut self) {
|
||||
self.emitted_in_stream = false;
|
||||
}
|
||||
|
||||
pub(crate) fn has_emitted_for_stream(&self, kind: StreamKind) -> bool {
|
||||
match kind {
|
||||
StreamKind::Reasoning => self.reasoning_emitted_in_stream,
|
||||
StreamKind::Answer => self.answer_emitted_in_stream,
|
||||
}
|
||||
/// Allow emitting the header again within the current turn after a finalize.
|
||||
pub(crate) fn allow_reemit_in_turn(&mut self) {
|
||||
self.emitted_this_turn = false;
|
||||
}
|
||||
|
||||
/// Allow emitting the header again for the same kind within the current turn.
|
||||
///
|
||||
/// This is used when a stream (e.g., Answer) is finalized and a subsequent
|
||||
/// block of the same kind is started within the same turn. Without this,
|
||||
/// only the first block would render a header.
|
||||
pub(crate) fn allow_reemit_for_same_kind_in_turn(&mut self, kind: StreamKind) {
|
||||
match kind {
|
||||
StreamKind::Reasoning => self.reasoning_emitted_this_turn = false,
|
||||
StreamKind::Answer => self.answer_emitted_this_turn = false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn maybe_emit(
|
||||
&mut self,
|
||||
kind: StreamKind,
|
||||
out_lines: &mut Vec<ratatui::text::Line<'static>>,
|
||||
) -> bool {
|
||||
let already_emitted_this_turn = match kind {
|
||||
StreamKind::Reasoning => self.reasoning_emitted_this_turn,
|
||||
StreamKind::Answer => self.answer_emitted_this_turn,
|
||||
};
|
||||
let already_emitted_in_stream = self.has_emitted_for_stream(kind);
|
||||
if !already_emitted_in_stream && !already_emitted_this_turn {
|
||||
out_lines.push(render_header_line(kind));
|
||||
match kind {
|
||||
StreamKind::Reasoning => {
|
||||
self.reasoning_emitted_in_stream = true;
|
||||
self.reasoning_emitted_this_turn = true;
|
||||
// Reset opposite header so it may be emitted again this turn
|
||||
self.answer_emitted_this_turn = false;
|
||||
}
|
||||
StreamKind::Answer => {
|
||||
self.answer_emitted_in_stream = true;
|
||||
self.answer_emitted_this_turn = true;
|
||||
// Reset opposite header so it may be emitted again this turn
|
||||
self.reasoning_emitted_this_turn = false;
|
||||
}
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
pub(crate) fn maybe_emit(&mut self, out_lines: &mut Vec<ratatui::text::Line<'static>>) -> bool {
|
||||
if !self.emitted_in_stream && !self.emitted_this_turn {
|
||||
out_lines.push(render_header_line());
|
||||
self.emitted_in_stream = true;
|
||||
self.emitted_this_turn = true;
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn render_header_line(kind: StreamKind) -> ratatui::text::Line<'static> {
|
||||
fn render_header_line() -> ratatui::text::Line<'static> {
|
||||
use ratatui::style::Stylize;
|
||||
match kind {
|
||||
StreamKind::Reasoning => ratatui::text::Line::from("thinking".magenta().italic()),
|
||||
StreamKind::Answer => ratatui::text::Line::from("codex".magenta().bold()),
|
||||
}
|
||||
ratatui::text::Line::from("codex".magenta().bold())
|
||||
}
|
||||
|
||||
223
codex-rs/tui/src/transcript_app.rs
Normal file
223
codex-rs/tui/src/transcript_app.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
use std::io::Result;
|
||||
|
||||
use crate::insert_history;
|
||||
use crate::tui;
|
||||
use crate::tui::TuiEvent;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyEventKind;
|
||||
use ratatui::buffer::Buffer;
|
||||
use ratatui::layout::Rect;
|
||||
use ratatui::style::Color;
|
||||
use ratatui::style::Style;
|
||||
use ratatui::style::Styled;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::text::Span;
|
||||
use ratatui::widgets::Paragraph;
|
||||
use ratatui::widgets::WidgetRef;
|
||||
|
||||
pub(crate) struct TranscriptApp {
|
||||
pub(crate) transcript_lines: Vec<Line<'static>>,
|
||||
pub(crate) scroll_offset: usize,
|
||||
pub(crate) is_done: bool,
|
||||
title: String,
|
||||
}
|
||||
|
||||
impl TranscriptApp {
|
||||
pub(crate) fn new(transcript_lines: Vec<Line<'static>>) -> Self {
|
||||
Self {
|
||||
transcript_lines,
|
||||
scroll_offset: 0,
|
||||
is_done: false,
|
||||
title: "T R A N S C R I P T".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_title(transcript_lines: Vec<Line<'static>>, title: String) -> Self {
|
||||
Self {
|
||||
transcript_lines,
|
||||
scroll_offset: 0,
|
||||
is_done: false,
|
||||
title,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn handle_event(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
|
||||
match event {
|
||||
TuiEvent::Key(key_event) => self.handle_key_event(tui, key_event),
|
||||
TuiEvent::Draw => {
|
||||
tui.draw(u16::MAX, |frame| {
|
||||
self.render(frame.area(), frame.buffer);
|
||||
})?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn insert_lines(&mut self, lines: Vec<Line<'static>>) {
|
||||
self.transcript_lines.extend(lines);
|
||||
}
|
||||
|
||||
fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) {
|
||||
match key_event {
|
||||
// Ctrl+Z is handled at the App level when transcript overlay is active
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('q'),
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
}
|
||||
| KeyEvent {
|
||||
code: KeyCode::Char('t'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
}
|
||||
| KeyEvent {
|
||||
code: KeyCode::Char('c'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
} => {
|
||||
self.is_done = true;
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::Up,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(1);
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::Down,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
self.scroll_offset = self.scroll_offset.saturating_add(1);
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::PageUp,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
let area = self.scroll_area(tui.terminal.viewport_area);
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(area.height as usize);
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::PageDown,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
let area = self.scroll_area(tui.terminal.viewport_area);
|
||||
self.scroll_offset = self.scroll_offset.saturating_add(area.height as usize);
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::Home,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
self.scroll_offset = 0;
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::End,
|
||||
kind: KeyEventKind::Press | KeyEventKind::Repeat,
|
||||
..
|
||||
} => {
|
||||
self.scroll_offset = usize::MAX;
|
||||
}
|
||||
_ => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
|
||||
fn scroll_area(&self, area: Rect) -> Rect {
|
||||
let mut area = area;
|
||||
// Reserve 1 line for the header and 4 lines for the bottom status section. This matches the chat composer.
|
||||
area.y = area.y.saturating_add(1);
|
||||
area.height = area.height.saturating_sub(5);
|
||||
area
|
||||
}
|
||||
|
||||
fn render(&mut self, area: Rect, buf: &mut Buffer) {
|
||||
Span::from("/ ".repeat(area.width as usize / 2))
|
||||
.dim()
|
||||
.render_ref(area, buf);
|
||||
let header = format!("/ {}", self.title);
|
||||
Span::from(header).dim().render_ref(area, buf);
|
||||
|
||||
// Main content area (excludes header and bottom status section)
|
||||
let content_area = self.scroll_area(area);
|
||||
let wrapped = insert_history::word_wrap_lines(&self.transcript_lines, content_area.width);
|
||||
|
||||
// Clamp scroll offset to valid range
|
||||
self.scroll_offset = self
|
||||
.scroll_offset
|
||||
.min(wrapped.len().saturating_sub(content_area.height as usize));
|
||||
let start = self.scroll_offset;
|
||||
let end = (start + content_area.height as usize).min(wrapped.len());
|
||||
let page = &wrapped[start..end];
|
||||
Paragraph::new(page.to_vec()).render_ref(content_area, buf);
|
||||
|
||||
// Fill remaining visible lines (if any) with a leading '~' in the first column.
|
||||
let visible = (end - start) as u16;
|
||||
if content_area.height > visible {
|
||||
let extra = content_area.height - visible;
|
||||
for i in 0..extra {
|
||||
let y = content_area.y.saturating_add(visible + i);
|
||||
Span::from("~")
|
||||
.dim()
|
||||
.render_ref(Rect::new(content_area.x, y, 1, 1), buf);
|
||||
}
|
||||
}
|
||||
|
||||
// Bottom status section (4 lines): separator with % scrolled, then key hints (styled like chat composer)
|
||||
let sep_y = content_area.bottom();
|
||||
let sep_rect = Rect::new(area.x, sep_y, area.width, 1);
|
||||
let hints_rect = Rect::new(area.x, sep_y + 1, area.width, 2);
|
||||
|
||||
// Separator line (dim)
|
||||
Span::from("─".repeat(sep_rect.width as usize))
|
||||
.dim()
|
||||
.render_ref(sep_rect, buf);
|
||||
|
||||
// Scroll percentage (0-100%) aligned near the right edge
|
||||
let max_scroll = wrapped.len().saturating_sub(content_area.height as usize);
|
||||
let percent: u8 = if max_scroll == 0 {
|
||||
100
|
||||
} else {
|
||||
(((self.scroll_offset.min(max_scroll)) as f32 / max_scroll as f32) * 100.0).round()
|
||||
as u8
|
||||
};
|
||||
let pct_text = format!(" {percent}% ");
|
||||
let pct_w = pct_text.chars().count() as u16;
|
||||
let pct_x = sep_rect.x + sep_rect.width - pct_w - 1;
|
||||
Span::from(pct_text)
|
||||
.dim()
|
||||
.render_ref(Rect::new(pct_x, sep_rect.y, pct_w, 1), buf);
|
||||
|
||||
let key_hint_style = Style::default().fg(Color::Cyan);
|
||||
|
||||
let hints1 = vec![
|
||||
" ".into(),
|
||||
"↑".set_style(key_hint_style),
|
||||
"/".into(),
|
||||
"↓".set_style(key_hint_style),
|
||||
" scroll ".into(),
|
||||
"PgUp".set_style(key_hint_style),
|
||||
"/".into(),
|
||||
"PgDn".set_style(key_hint_style),
|
||||
" page ".into(),
|
||||
"Home".set_style(key_hint_style),
|
||||
"/".into(),
|
||||
"End".set_style(key_hint_style),
|
||||
" jump".into(),
|
||||
];
|
||||
|
||||
let hints2 = vec![" ".into(), "q".set_style(key_hint_style), " quit".into()];
|
||||
Paragraph::new(vec![Line::from(hints1).dim(), Line::from(hints2).dim()])
|
||||
.render_ref(hints_rect, buf);
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,32 @@
|
||||
use std::io::Result;
|
||||
use std::io::Stdout;
|
||||
use std::io::stdout;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
#[cfg(unix)]
|
||||
use std::sync::atomic::AtomicU8;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use crossterm::SynchronizedUpdate;
|
||||
use crossterm::cursor;
|
||||
use crossterm::cursor::MoveTo;
|
||||
use crossterm::event::DisableBracketedPaste;
|
||||
use crossterm::event::EnableBracketedPaste;
|
||||
use crossterm::event::Event;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyEventKind;
|
||||
use crossterm::event::KeyModifiers;
|
||||
use crossterm::event::KeyboardEnhancementFlags;
|
||||
use crossterm::event::PopKeyboardEnhancementFlags;
|
||||
use crossterm::event::PushKeyboardEnhancementFlags;
|
||||
use crossterm::terminal::Clear;
|
||||
use crossterm::terminal::ClearType;
|
||||
use crossterm::terminal::EnterAlternateScreen;
|
||||
use crossterm::terminal::LeaveAlternateScreen;
|
||||
use crossterm::terminal::ScrollUp;
|
||||
use ratatui::backend::Backend;
|
||||
use ratatui::backend::CrosstermBackend;
|
||||
use ratatui::crossterm::execute;
|
||||
@@ -25,6 +35,7 @@ use ratatui::crossterm::terminal::enable_raw_mode;
|
||||
use ratatui::layout::Offset;
|
||||
use ratatui::text::Line;
|
||||
|
||||
use crate::clipboard_paste::paste_image_to_temp_png;
|
||||
use crate::custom_terminal;
|
||||
use crate::custom_terminal::Terminal as CustomTerminal;
|
||||
use tokio::select;
|
||||
@@ -71,8 +82,14 @@ pub fn init() -> Result<Terminal> {
|
||||
|
||||
set_panic_hook();
|
||||
|
||||
// Clear screen and move cursor to top-left before drawing UI
|
||||
execute!(stdout(), Clear(ClearType::All), MoveTo(0, 0))?;
|
||||
// Instead of clearing the screen (which can drop scrollback in some terminals),
|
||||
// scroll existing lines up until the cursor reaches the top, then start at (0, 0).
|
||||
if let Ok((_x, y)) = cursor::position()
|
||||
&& y > 0
|
||||
{
|
||||
execute!(stdout(), ScrollUp(y))?;
|
||||
}
|
||||
execute!(stdout(), MoveTo(0, 0))?;
|
||||
|
||||
let backend = CrosstermBackend::new(stdout());
|
||||
let tui = CustomTerminal::with_options(backend)?;
|
||||
@@ -92,8 +109,12 @@ pub enum TuiEvent {
|
||||
Key(KeyEvent),
|
||||
Paste(String),
|
||||
Draw,
|
||||
#[cfg(unix)]
|
||||
ResumeFromSuspend,
|
||||
AttachImage {
|
||||
path: PathBuf,
|
||||
width: u32,
|
||||
height: u32,
|
||||
format_label: &'static str,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Tui {
|
||||
@@ -101,6 +122,29 @@ pub struct Tui {
|
||||
draw_tx: tokio::sync::broadcast::Sender<()>,
|
||||
pub(crate) terminal: Terminal,
|
||||
pending_history_lines: Vec<Line<'static>>,
|
||||
alt_saved_viewport: Option<ratatui::layout::Rect>,
|
||||
#[cfg(unix)]
|
||||
resume_pending: Arc<AtomicU8>, // Stores a ResumeAction
|
||||
// True when overlay alt-screen UI is active
|
||||
alt_screen_active: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
enum ResumeAction {
|
||||
None = 0,
|
||||
RealignInline = 1,
|
||||
RestoreAlt = 2,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn take_resume_action(pending: &AtomicU8) -> ResumeAction {
|
||||
match pending.swap(ResumeAction::None as u8, Ordering::Relaxed) {
|
||||
1 => ResumeAction::RealignInline,
|
||||
2 => ResumeAction::RestoreAlt,
|
||||
_ => ResumeAction::None,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -178,6 +222,10 @@ impl Tui {
|
||||
draw_tx,
|
||||
terminal,
|
||||
pending_history_lines: vec![],
|
||||
alt_saved_viewport: None,
|
||||
#[cfg(unix)]
|
||||
resume_pending: Arc::new(AtomicU8::new(0)),
|
||||
alt_screen_active: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,31 +239,67 @@ impl Tui {
|
||||
use tokio_stream::StreamExt;
|
||||
let mut crossterm_events = crossterm::event::EventStream::new();
|
||||
let mut draw_rx = self.draw_tx.subscribe();
|
||||
#[cfg(unix)]
|
||||
let resume_pending = self.resume_pending.clone();
|
||||
#[cfg(unix)]
|
||||
let alt_screen_active = self.alt_screen_active.clone();
|
||||
let event_stream = async_stream::stream! {
|
||||
loop {
|
||||
select! {
|
||||
Some(Ok(event)) = crossterm_events.next() => {
|
||||
match event {
|
||||
crossterm::event::Event::Key(KeyEvent {
|
||||
code: KeyCode::Char('z'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
// Detect Ctrl+V to attach an image from the clipboard.
|
||||
Event::Key(key_event @ KeyEvent {
|
||||
code: KeyCode::Char('v'),
|
||||
modifiers: KeyModifiers::CONTROL,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
}) => {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = Tui::suspend();
|
||||
yield TuiEvent::ResumeFromSuspend;
|
||||
yield TuiEvent::Draw;
|
||||
match paste_image_to_temp_png() {
|
||||
Ok((path, info)) => {
|
||||
yield TuiEvent::AttachImage {
|
||||
path,
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
format_label: info.encoded_format.label(),
|
||||
};
|
||||
}
|
||||
Err(_) => {
|
||||
// Fall back to normal key handling if no image is available.
|
||||
yield TuiEvent::Key(key_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
crossterm::event::Event::Key(key_event) => {
|
||||
#[cfg(unix)]
|
||||
if matches!(
|
||||
key_event,
|
||||
crossterm::event::KeyEvent {
|
||||
code: crossterm::event::KeyCode::Char('z'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
kind: crossterm::event::KeyEventKind::Press,
|
||||
..
|
||||
}
|
||||
)
|
||||
{
|
||||
if alt_screen_active.load(Ordering::Relaxed) {
|
||||
let _ = execute!(stdout(), LeaveAlternateScreen);
|
||||
resume_pending.store(ResumeAction::RestoreAlt as u8, Ordering::Relaxed);
|
||||
} else {
|
||||
resume_pending.store(ResumeAction::RealignInline as u8, Ordering::Relaxed);
|
||||
}
|
||||
let _ = execute!(stdout(), crossterm::cursor::Show);
|
||||
let _ = Tui::suspend();
|
||||
yield TuiEvent::Draw;
|
||||
continue;
|
||||
}
|
||||
yield TuiEvent::Key(key_event);
|
||||
}
|
||||
crossterm::event::Event::Resize(_, _) => {
|
||||
Event::Resize(_, _) => {
|
||||
yield TuiEvent::Draw;
|
||||
}
|
||||
crossterm::event::Event::Paste(pasted) => {
|
||||
Event::Paste(pasted) => {
|
||||
yield TuiEvent::Paste(pasted);
|
||||
}
|
||||
_ => {}
|
||||
@@ -240,7 +324,6 @@ impl Tui {
|
||||
};
|
||||
Box::pin(event_stream)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn suspend() -> Result<()> {
|
||||
restore()?;
|
||||
@@ -249,6 +332,69 @@ impl Tui {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn apply_resume_action(&mut self, action: ResumeAction) -> Result<()> {
|
||||
match action {
|
||||
ResumeAction::RealignInline => {
|
||||
let cursor_pos = self.terminal.get_cursor_position()?;
|
||||
self.terminal
|
||||
.set_viewport_area(ratatui::layout::Rect::new(0, cursor_pos.y, 0, 0));
|
||||
}
|
||||
ResumeAction::RestoreAlt => {
|
||||
// When we're resuming from alt screen, we need to save what the cursor position
|
||||
// _was_ when we resumed. That way, when we leave the alt screen, we can restore
|
||||
// the cursor to the new position.
|
||||
if let Ok((_x, y)) = crossterm::cursor::position()
|
||||
&& let Some(saved) = self.alt_saved_viewport.as_mut()
|
||||
{
|
||||
saved.y = y;
|
||||
}
|
||||
let _ = execute!(self.terminal.backend_mut(), EnterAlternateScreen);
|
||||
if let Ok(size) = self.terminal.size() {
|
||||
self.terminal.set_viewport_area(ratatui::layout::Rect::new(
|
||||
0,
|
||||
0,
|
||||
size.width,
|
||||
size.height,
|
||||
));
|
||||
self.terminal.clear()?;
|
||||
}
|
||||
}
|
||||
ResumeAction::None => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Public suspend() removed; Ctrl+Z is handled internally via event_stream + draw.
|
||||
|
||||
/// Enter alternate screen and expand the viewport to full terminal size, saving the current
|
||||
/// inline viewport for restoration when leaving.
|
||||
pub fn enter_alt_screen(&mut self) -> Result<()> {
|
||||
let _ = execute!(self.terminal.backend_mut(), EnterAlternateScreen);
|
||||
if let Ok(size) = self.terminal.size() {
|
||||
self.alt_saved_viewport = Some(self.terminal.viewport_area);
|
||||
self.terminal.set_viewport_area(ratatui::layout::Rect::new(
|
||||
0,
|
||||
0,
|
||||
size.width,
|
||||
size.height,
|
||||
));
|
||||
let _ = self.terminal.clear();
|
||||
}
|
||||
self.alt_screen_active.store(true, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Leave alternate screen and restore the previously saved inline viewport, if any.
|
||||
pub fn leave_alt_screen(&mut self) -> Result<()> {
|
||||
let _ = execute!(self.terminal.backend_mut(), LeaveAlternateScreen);
|
||||
if let Some(saved) = self.alt_saved_viewport.take() {
|
||||
self.terminal.set_viewport_area(saved);
|
||||
}
|
||||
self.alt_screen_active.store(false, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert_history_lines(&mut self, lines: Vec<Line<'static>>) {
|
||||
self.pending_history_lines.extend(lines);
|
||||
self.frame_requester().schedule_frame();
|
||||
@@ -260,6 +406,11 @@ impl Tui {
|
||||
draw_fn: impl FnOnce(&mut custom_terminal::Frame),
|
||||
) -> Result<()> {
|
||||
std::io::stdout().sync_update(|_| {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Apply any post-resume action before layout/clear/draw.
|
||||
self.apply_resume_action(take_resume_action(&self.resume_pending))?;
|
||||
}
|
||||
let terminal = &mut self.terminal;
|
||||
let screen_size = terminal.size()?;
|
||||
let last_known_screen_size = terminal.last_known_screen_size;
|
||||
|
||||
@@ -328,7 +328,7 @@ impl UserApprovalWidget {
|
||||
}
|
||||
}
|
||||
lines.push(Line::from(""));
|
||||
self.app_event_tx.send(AppEvent::InsertHistory(lines));
|
||||
self.app_event_tx.send(AppEvent::InsertHistoryLines(lines));
|
||||
|
||||
let op = match &self.approval_request {
|
||||
ApprovalRequest::Exec { id, .. } => Op::ExecApproval {
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
thinking
|
||||
Investigating binary sizes
|
||||
To get started, describe a task or try one of these commands:
|
||||
|
||||
I need to check the codex-rs repository to explain why the project's binaries
|
||||
are large. The user is likely seeking specifics about the setup: are Rust builds
|
||||
static, what features are enabled, and is debug information included? It could
|
||||
be due to static linking, included OpenSSL, or how panic handling is set up. I
|
||||
should look into the Cargo.toml file to confirm features and profiles without
|
||||
needing to edit any code. Let's get started on this!
|
||||
/init - create an AGENTS.md file with instructions for Codex
|
||||
/status - show current session configuration and token usage
|
||||
/approvals - choose what Codex can do without approval
|
||||
/model - choose what model and reasoning effort to use
|
||||
|
||||
codex
|
||||
I’m going to scan the workspace and Cargo manifests to see build profiles and
|
||||
@@ -37,16 +34,6 @@ dependencies that impact binary size. Then I’ll summarize the main causes.
|
||||
# ratatui = { path = "../../ratatui" }
|
||||
ratatui = { git = "https://github.com/nornagon/ratatui", branch =
|
||||
"nornagon-v0.29.0-patch" }
|
||||
Optimizing Rust Release Profile
|
||||
|
||||
I'm reviewing the workspace's release profile, which has settings like lto=fat,
|
||||
strip=symbols, and codegen-units=1 to reduce binary size. However, it seems that
|
||||
"strip = symbols" only removes symbols, not debug information. In Rust 1.73+,
|
||||
both "symbols" and "debuginfo" stripping options are available. I should check
|
||||
whether using these results in a smaller binary. I’ll also skim through crates
|
||||
for heavy dependencies in Cargo.toml, including cli, core, exec, linux-sandbox,
|
||||
tui, login, ollama, and mcp.
|
||||
|
||||
⚡ Running for d in ansi-escape apply-patch arg0 cli common core exec execpolicy
|
||||
file-search linux-sandbox login mcp-client mcp-server mcp-types ollama tui; do
|
||||
echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
|
||||
@@ -181,85 +168,6 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
thinking
|
||||
Considering binary size impacts
|
||||
|
||||
I’m evaluating how binary size is influenced by various factors. There are many
|
||||
binaries, such as codex and codex-exec, which include dependencies by default,
|
||||
pulling in features like Tokio and hyper. For instance, reqwest includes
|
||||
default features like TLS through rustls. If default features remain enabled,
|
||||
it inflates the size with many additional components. I notice that for musl
|
||||
targets, using vendored builds of OpenSSL could significantly increase binary
|
||||
size. This is quite the intricate web of dependencies!
|
||||
|
||||
Analyzing musl and binary size
|
||||
|
||||
I’m considering the implications of building for musl with static linking.
|
||||
In this case, OpenSSL might come into play through some crates, but reqwest
|
||||
defaults to using rustls, although openssl-sys vendored could increase binary
|
||||
size. It’s interesting to think about whether they’re targeting musl, especially
|
||||
since it seems they might often be building for Mac and Linux with dynamic
|
||||
linking instead.
|
||||
|
||||
I also realize there’s a difference between debug and release builds — release
|
||||
should be smaller due to optimizations, but if they’re defaulting to debug
|
||||
builds, that could lead to much larger binaries.
|
||||
|
||||
Assessing crate impacts on size
|
||||
|
||||
I need to be precise when analyzing binary sizes from various crates. Some
|
||||
crates, like ratatui-image, include heavy features such as libjpeg decoders,
|
||||
which can really bulk things up. Then there are the tree-sitter crates that
|
||||
contain large static grammar tables, adding significant size. The starlark crate
|
||||
and tracing-subscriber also contribute to size increases.
|
||||
|
||||
With static linking per binary being standard in Rust, it leads to multiple
|
||||
copies of the same dependencies unless cdylibs are used. That makes the compile
|
||||
times longer, but using LTO helps reduce bloat somewhat through deduplication.
|
||||
|
||||
Evaluating binary size factors
|
||||
|
||||
Having multiple binaries indeed increases the overall size significantly.
|
||||
I'm considering options like upgrading to "opt-level = 'z'" or using
|
||||
"strip=debuginfo" for reducing size. It's interesting that while the profile
|
||||
only sets symbols for stripping, not debuginfo, this could inflate sizes if
|
||||
retained in release builds.
|
||||
|
||||
The distinction between "symbols" and "debuginfo" is critical; "symbols" keeps
|
||||
debug information which may not align with the goal of minimizing size. I’d like
|
||||
to verify with the Rust documentation, but my network access is restricted.
|
||||
|
||||
Explaining large binary sizes
|
||||
|
||||
I’m analyzing why binaries in this project are so large. The primary reasons
|
||||
include monolithic static linking, where each binary includes the full
|
||||
dependency graph without shared libraries. Heavy dependencies such as reqwest,
|
||||
hyper, and TLS components also contribute significantly to size, along with
|
||||
image decoders and other libraries.
|
||||
|
||||
Using MUSL targets means vendored OpenSSL can drastically increase the size due
|
||||
to static linking. Furthermore, if the release profile keeps debug information
|
||||
due to using "strip = symbols," that can lead to larger binaries as well. It’s a
|
||||
mix of several factors, including panic strategies and platform-specific linking
|
||||
behavior.
|
||||
|
||||
Identifying binary size contributors
|
||||
|
||||
I’m considering why binaries are still large even without static linking. The
|
||||
tracing-subscriber crate potentially adds weight because it includes regex,
|
||||
though it can sometimes use a lighter version. References from files like "core/
|
||||
Cargo.toml" show dependencies like reqwest and tokio, while the tui uses the
|
||||
image crate.
|
||||
|
||||
To present this clearly, I’ll format the final answer with two main sections:
|
||||
"Main Causes" and "Build-Mode Notes." I can also include brief suggestions for
|
||||
reducing size, but I want to stay focused on answering the user's question.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
codex
|
||||
Here’s what’s driving size in this workspace’s binaries.
|
||||
|
||||
@@ -295,4 +203,4 @@ assertions—outputs are much larger than cargo build --release.
|
||||
|
||||
If you want, I can outline targeted trims (e.g., strip = "debuginfo", opt-level
|
||||
= "z", panic abort, tighter tokio/reqwest features) and estimate impact per
|
||||
binary.
|
||||
binary.
|
||||
|
||||
Reference in New Issue
Block a user