Compare commits

..

3 Commits

Author SHA1 Message Date
zhao-oai
b81cb7ceb3 Fix sandbox detection for user shell commands (#6094) 2025-11-01 17:27:03 -04:00
kevin zhao
c7a3428986 fix 2025-10-31 17:27:35 -04:00
kevin zhao
d609dfa2fc escalating permissions 2025-10-31 17:15:50 -04:00
231 changed files with 4870 additions and 14377 deletions

View File

@@ -4,5 +4,3 @@ Before opening this Pull Request, please read the dedicated "Contributing" markd
https://github.com/openai/codex/blob/main/docs/contributing.md
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
Include a link to a bug report or enhancement request.

View File

@@ -46,7 +46,7 @@ jobs:
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}

View File

@@ -16,27 +16,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: contributor-assistant/github-action@v2.6.1
# Run on close only if the PR was merged. This will lock the PR to preserve
# the CLA agreement. We don't want to lock PRs that have been closed without
# merging because the contributor may want to respond with additional comments.
# This action has a "lock-pullrequest-aftermerge" option that can be set to false,
# but that would unconditionally skip locking even in cases where the PR was merged.
if: |
(
github.event_name == 'pull_request_target' &&
(
github.event.action == 'opened' ||
github.event.action == 'synchronize' ||
(github.event.action == 'closed' && github.event.pull_request.merged == true)
)
) ||
(
github.event_name == 'issue_comment' &&
(
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
)
)
github.event_name == 'pull_request_target' ||
github.event.comment.body == 'recheck' ||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -22,6 +22,6 @@ jobs:
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2.1
with:
ignore_words_file: .codespellignore

View File

@@ -26,36 +26,21 @@ jobs:
prompt: |
You are an assistant that reviews GitHub issues for the repository.
Your job is to choose the most appropriate labels for the issue described later in this prompt.
Your job is to choose the most appropriate existing labels for the issue described later in this prompt.
Follow these rules:
- Only pick labels out of the list below.
- Prefer a small set of precise labels over many broad ones.
- Add one (and only one) of the following three labels to distinguish the type of issue. Default to "bug" if unsure.
Labels to apply:
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
3. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
1. CLI — the Codex command line interface.
2. extension — VS Code (or other IDE) extension-specific issues.
3. codex-web — Issues targeting the Codex web UI/Cloud experience.
4. github-actionIssues with the Codex GitHub action.
5. iOS — Issues with the Codex iOS app.
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
2. mcp — Topics involving Model Context Protocol servers/clients.
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
4. azure — Problems or requests tied to Azure OpenAI deployments.
5. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
6. code-review — Issues related to the code review feature or functionality.
7. auth - Problems related to authentication, login, or access tokens.
8. codex-exec - Problems related to the "codex exec" command or functionality.
9. context-management - Problems related to compaction, context windows, or available context reporting.
10. custom-model - Problems that involve using custom model providers, local models, or OSS models.
11. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
12. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
13. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
14. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
3. extension — VS Code (or other IDE) extension-specific issues.
4. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
5. mcp — Topics involving Model Context Protocol servers/clients.
6. codex-web — Issues targeting the Codex web UI/Cloud experience.
8. azure — Problems or requests tied to Azure OpenAI deployments.
9. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
10. model-behaviorUndesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
Issue number: ${{ github.event.issue.number }}

View File

@@ -76,7 +76,7 @@ jobs:
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: cargo-shear
version: 1.5.1
@@ -170,7 +170,7 @@ jobs:
# Install and restore sccache cache
- name: Install sccache
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: sccache
version: 0.7.5
@@ -228,7 +228,7 @@ jobs:
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: cargo-chef
version: 0.1.71
@@ -370,7 +370,7 @@ jobs:
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: sccache
version: 0.7.5
@@ -399,7 +399,7 @@ jobs:
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: nextest
version: 0.9.103

View File

@@ -295,15 +295,6 @@ jobs:
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
# Windows targets; remove them elsewhere to limit the number of
# artifacts that end up in the GitHub Release.
keep_originals=false
if [[ "${{ matrix.runner }}" == windows* ]]; then
keep_originals=true
fi
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
@@ -333,11 +324,7 @@ jobs:
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd_args=(-T0 -19)
if [[ "${keep_originals}" == false ]]; then
zstd_args+=(--rm)
fi
zstd "${zstd_args[@]}" "$dest/$base"
zstd -T0 -19 --rm "$dest/$base"
done
- name: Remove signing keychain
@@ -363,7 +350,7 @@ jobs:
fi
fi
- uses: actions/upload-artifact@v5
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz

View File

@@ -84,7 +84,6 @@ If you dont have the tool:
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
- `ResponsesRequest` exposes helpers (`body_json`, `input`, `function_call_output`, `custom_tool_call_output`, `call_output`, `header`, `path`, `query_param`) so assertions can target structured payloads instead of manual JSON digging.
- Build SSE payloads with the provided `ev_*` constructors and the `sse(...)`.
- Prefer `wait_for_event` over `wait_for_event_with_timeout`.
- Typical pattern:

View File

@@ -75,13 +75,11 @@ Codex CLI supports a rich set of configuration options, with preferences stored
- [**Getting started**](./docs/getting-started.md)
- [CLI usage](./docs/getting-started.md#cli-usage)
- [Slash Commands](./docs/slash_commands.md)
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Configuration**](./docs/config.md)
- [Example config](./docs/example-config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)

232
codex-rs/Cargo.lock generated
View File

@@ -186,11 +186,9 @@ dependencies = [
"chrono",
"codex-app-server-protocol",
"codex-core",
"codex-protocol",
"serde",
"serde_json",
"tokio",
"uuid",
"wiremock",
]
@@ -211,7 +209,6 @@ dependencies = [
"parking_lot",
"percent-encoding",
"windows-sys 0.59.0",
"wl-clipboard-rs",
"x11rb",
]
@@ -238,44 +235,46 @@ dependencies = [
[[package]]
name = "askama"
version = "0.14.0"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f75363874b771be265f4ffe307ca705ef6f3baa19011c149da8674a87f1b75c4"
checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28"
dependencies = [
"askama_derive",
"itoa",
"askama_escape",
"humansize",
"num-traits",
"percent-encoding",
"serde",
"serde_json",
]
[[package]]
name = "askama_derive"
version = "0.14.0"
version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "129397200fe83088e8a68407a8e2b1f826cf0086b21ccdb866a722c8bcd3a94f"
checksum = "19fe8d6cb13c4714962c072ea496f3392015f0989b1a2847bb4b2d9effd71d83"
dependencies = [
"askama_parser",
"basic-toml",
"memchr",
"mime",
"mime_guess",
"proc-macro2",
"quote",
"rustc-hash 2.1.1",
"serde",
"serde_derive",
"syn 2.0.104",
]
[[package]]
name = "askama_parser"
version = "0.14.0"
name = "askama_escape"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6ab5630b3d5eaf232620167977f95eb51f3432fc76852328774afbd242d4358"
checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341"
[[package]]
name = "askama_parser"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acb1161c6b64d1c3d83108213c2a2533a342ac225aabd0bda218278c2ddb00c0"
dependencies = [
"memchr",
"serde",
"serde_derive",
"winnow",
"nom",
]
[[package]]
@@ -872,7 +871,6 @@ dependencies = [
"anyhow",
"clap",
"codex-protocol",
"mcp-types",
"paste",
"pretty_assertions",
"schemars 0.8.22",
@@ -980,23 +978,20 @@ dependencies = [
"codex-mcp-server",
"codex-process-hardening",
"codex-protocol",
"codex-protocol-ts",
"codex-responses-api-proxy",
"codex-rmcp-client",
"codex-stdio-to-uds",
"codex-tui",
"codex-windows-sandbox",
"ctor 0.5.0",
"libc",
"owo-colors",
"predicates",
"pretty_assertions",
"regex-lite",
"serde_json",
"supports-color",
"tempfile",
"tokio",
"toml",
"tracing",
]
[[package]]
@@ -1067,7 +1062,6 @@ dependencies = [
"chrono",
"codex-app-server-protocol",
"codex-apply-patch",
"codex-arg0",
"codex-async-utils",
"codex-file-search",
"codex-git",
@@ -1082,7 +1076,6 @@ dependencies = [
"codex-windows-sandbox",
"core-foundation 0.9.4",
"core_test_support",
"ctor 0.5.0",
"dirs",
"dunce",
"env-flags",
@@ -1368,6 +1361,16 @@ dependencies = [
"uuid",
]
[[package]]
name = "codex-protocol-ts"
version = "0.0.0"
dependencies = [
"anyhow",
"clap",
"codex-app-server-protocol",
"ts-rs",
]
[[package]]
name = "codex-responses-api-proxy"
version = "0.0.0"
@@ -1445,10 +1448,8 @@ dependencies = [
"codex-login",
"codex-ollama",
"codex-protocol",
"codex-windows-sandbox",
"color-eyre",
"crossterm",
"derive_more 2.0.1",
"diffy",
"dirs",
"dunce",
@@ -1468,7 +1469,6 @@ dependencies = [
"regex-lite",
"serde",
"serde_json",
"serial_test",
"shlex",
"strum 0.27.2",
"strum_macros 0.27.2",
@@ -1559,7 +1559,6 @@ version = "0.1.0"
dependencies = [
"anyhow",
"dirs-next",
"dunce",
"rand 0.8.5",
"serde",
"serde_json",
@@ -1653,15 +1652,6 @@ dependencies = [
"unicode-segmentation",
]
[[package]]
name = "convert_case"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "core-foundation"
version = "0.9.4"
@@ -1760,7 +1750,8 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crossterm"
version = "0.28.1"
source = "git+https://github.com/nornagon/crossterm?branch=nornagon%2Fcolor-query#87db8bfa6dc99427fd3b071681b07fc31c6ce995"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
dependencies = [
"bitflags 2.10.0",
"crossterm_winapi",
@@ -2007,7 +1998,7 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22"
dependencies = [
"convert_case 0.6.0",
"convert_case",
"proc-macro2",
"quote",
"syn 2.0.104",
@@ -2020,7 +2011,6 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
dependencies = [
"convert_case 0.7.1",
"proc-macro2",
"quote",
"syn 2.0.104",
@@ -2884,6 +2874,15 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humansize"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7"
dependencies = [
"libm",
]
[[package]]
name = "hyper"
version = "1.7.0"
@@ -3527,6 +3526,12 @@ dependencies = [
"pkg-config",
]
[[package]]
name = "libm"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
[[package]]
name = "libredox"
version = "0.1.6"
@@ -4293,16 +4298,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "os_pipe"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db335f4760b14ead6290116f2427bf33a14d4f0617d49f78a246de10c1831224"
dependencies = [
"libc",
"windows-sys 0.59.0",
]
[[package]]
name = "owo-colors"
version = "4.2.2"
@@ -4450,7 +4445,7 @@ checksum = "3af6b589e163c5a788fab00ce0c0366f6efbb9959c2f9874b224936af7fce7e1"
dependencies = [
"base64",
"indexmap 2.12.0",
"quick-xml 0.38.0",
"quick-xml",
"serde",
"time",
]
@@ -4679,15 +4674,6 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3"
[[package]]
name = "quick-xml"
version = "0.37.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb"
dependencies = [
"memchr",
]
[[package]]
name = "quick-xml"
version = "0.38.0"
@@ -5022,9 +5008,9 @@ dependencies = [
[[package]]
name = "rmcp"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5947688160b56fb6c827e3c20a72c90392a1d7e9dec74749197aa1780ac42ca"
checksum = "1fdad1258f7259fdc0f2dfc266939c82c3b5d1fd72bcde274d600cdc27e60243"
dependencies = [
"base64",
"bytes",
@@ -5056,9 +5042,9 @@ dependencies = [
[[package]]
name = "rmcp-macros"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01263441d3f8635c628e33856c468b96ebbce1af2d3699ea712ca71432d4ee7a"
checksum = "ede0589a208cc7ce81d1be68aa7e74b917fcd03c81528408bab0457e187dcd9b"
dependencies = [
"darling 0.21.3",
"proc-macro2",
@@ -5699,12 +5685,6 @@ dependencies = [
"digest",
]
[[package]]
name = "sha1_smol"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
[[package]]
name = "sha2"
version = "0.10.9"
@@ -6725,18 +6705,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4013970217383f67b18aef68f6fb2e8d409bc5755227092d32efb0422ba24b8"
[[package]]
name = "tree_magic_mini"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f943391d896cdfe8eec03a04d7110332d445be7df856db382dd96a730667562c"
dependencies = [
"memchr",
"nom",
"once_cell",
"petgraph",
]
[[package]]
name = "try-lock"
version = "0.2.5"
@@ -6904,7 +6872,6 @@ dependencies = [
"getrandom 0.3.3",
"js-sys",
"serde",
"sha1_smol",
"wasm-bindgen",
]
@@ -7074,76 +7041,6 @@ dependencies = [
"web-sys",
]
[[package]]
name = "wayland-backend"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673a33c33048a5ade91a6b139580fa174e19fb0d23f396dca9fa15f2e1e49b35"
dependencies = [
"cc",
"downcast-rs",
"rustix 1.0.8",
"smallvec",
"wayland-sys",
]
[[package]]
name = "wayland-client"
version = "0.31.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c66a47e840dc20793f2264eb4b3e4ecb4b75d91c0dd4af04b456128e0bdd449d"
dependencies = [
"bitflags 2.10.0",
"rustix 1.0.8",
"wayland-backend",
"wayland-scanner",
]
[[package]]
name = "wayland-protocols"
version = "0.32.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efa790ed75fbfd71283bd2521a1cfdc022aabcc28bdcff00851f9e4ae88d9901"
dependencies = [
"bitflags 2.10.0",
"wayland-backend",
"wayland-client",
"wayland-scanner",
]
[[package]]
name = "wayland-protocols-wlr"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efd94963ed43cf9938a090ca4f7da58eb55325ec8200c3848963e98dc25b78ec"
dependencies = [
"bitflags 2.10.0",
"wayland-backend",
"wayland-client",
"wayland-protocols",
"wayland-scanner",
]
[[package]]
name = "wayland-scanner"
version = "0.31.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54cb1e9dc49da91950bdfd8b848c49330536d9d1fb03d4bfec8cae50caa50ae3"
dependencies = [
"proc-macro2",
"quick-xml 0.37.5",
"quote",
]
[[package]]
name = "wayland-sys"
version = "0.31.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34949b42822155826b41db8e5d0c1be3a2bd296c747577a43a3e6daefc296142"
dependencies = [
"pkg-config",
]
[[package]]
name = "web-sys"
version = "0.3.77"
@@ -7715,25 +7612,6 @@ dependencies = [
"bitflags 2.10.0",
]
[[package]]
name = "wl-clipboard-rs"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5ff8d0e60065f549fafd9d6cb626203ea64a798186c80d8e7df4f8af56baeb"
dependencies = [
"libc",
"log",
"os_pipe",
"rustix 0.38.44",
"tempfile",
"thiserror 2.0.17",
"tree_magic_mini",
"wayland-backend",
"wayland-client",
"wayland-protocols",
"wayland-protocols-wlr",
]
[[package]]
name = "writeable"
version = "0.6.2"
@@ -7902,9 +7780,9 @@ dependencies = [
[[package]]
name = "zeroize"
version = "1.8.2"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
dependencies = [
"zeroize_derive",
]

View File

@@ -25,6 +25,7 @@ members = [
"ollama",
"process-hardening",
"protocol",
"protocol-ts",
"rmcp-client",
"responses-api-proxy",
"stdio-to-uds",
@@ -74,6 +75,7 @@ codex-ollama = { path = "ollama" }
codex-otel = { path = "otel" }
codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-protocol-ts = { path = "protocol-ts" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
@@ -85,7 +87,7 @@ codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
codex-utils-tokenizer = { path = "utils/tokenizer" }
codex-windows-sandbox = { path = "windows-sandbox-rs" }
codex-windows-sandbox = { path = "windows-sandbox" }
core_test_support = { path = "core/tests/common" }
mcp-types = { path = "mcp-types" }
mcp_test_support = { path = "mcp-server/tests/common" }
@@ -94,8 +96,8 @@ mcp_test_support = { path = "mcp-server/tests/common" }
allocative = "0.3.3"
ansi-to-tui = "7.0.0"
anyhow = "1"
arboard = { version = "3", features = ["wayland-data-control"] }
askama = "0.14"
arboard = "3"
askama = "0.12"
assert_cmd = "2"
assert_matches = "1.5.0"
async-channel = "2.3.1"
@@ -160,7 +162,7 @@ ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex-lite = "0.1.7"
reqwest = "0.12"
rmcp = { version = "0.8.5", default-features = false }
rmcp = { version = "0.8.3", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.34.0"
@@ -211,7 +213,7 @@ which = "6"
wildmatch = "2.5.0"
wiremock = "0.6"
zeroize = "1.8.2"
zeroize = "1.8.1"
[workspace.lints]
rust = {}
@@ -254,12 +256,7 @@ unwrap_used = "deny"
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = [
"icu_provider",
"openssl-sys",
"codex-utils-readiness",
"codex-utils-tokenizer",
]
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness", "codex-utils-tokenizer"]
[profile.release]
lto = "fat"
@@ -279,7 +276,6 @@ opt-level = 0
# Uncomment to debug local changes.
# ratatui = { path = "../../ratatui" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }

View File

@@ -58,7 +58,7 @@ To test to see what happens when a command is run under the sandbox provided by
```
# macOS
codex sandbox macos [--full-auto] [--log-denials] [COMMAND]...
codex sandbox macos [--full-auto] [COMMAND]...
# Linux
codex sandbox linux [--full-auto] [COMMAND]...
@@ -67,7 +67,7 @@ codex sandbox linux [--full-auto] [COMMAND]...
codex sandbox windows [--full-auto] [COMMAND]...
# Legacy aliases
codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]...
codex debug seatbelt [--full-auto] [COMMAND]...
codex debug landlock [--full-auto] [COMMAND]...
```

View File

@@ -14,7 +14,6 @@ workspace = true
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-protocol = { workspace = true }
mcp-types = { workspace = true }
paste = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }

View File

@@ -2,27 +2,20 @@ use crate::ClientNotification;
use crate::ClientRequest;
use crate::ServerNotification;
use crate::ServerRequest;
use crate::export_client_notification_schemas;
use crate::export_client_param_schemas;
use crate::export_client_response_schemas;
use crate::export_client_responses;
use crate::export_server_notification_schemas;
use crate::export_server_param_schemas;
use crate::export_server_response_schemas;
use crate::export_server_responses;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::SandboxPolicy;
use schemars::JsonSchema;
use schemars::schema::RootSchema;
use schemars::schema_for;
use serde::Serialize;
use serde_json::Map;
use serde_json::Value;
use std::collections::HashMap;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fs;
@@ -35,29 +28,83 @@ use ts_rs::TS;
const HEADER: &str = "// GENERATED CODE! DO NOT MODIFY BY HAND!\n\n";
#[derive(Clone)]
pub struct GeneratedSchema {
namespace: Option<String>,
logical_name: String,
value: Value,
in_v1_dir: bool,
macro_rules! for_each_schema_type {
($macro:ident) => {
$macro!(crate::RequestId);
$macro!(crate::JSONRPCMessage);
$macro!(crate::JSONRPCRequest);
$macro!(crate::JSONRPCNotification);
$macro!(crate::JSONRPCResponse);
$macro!(crate::JSONRPCError);
$macro!(crate::JSONRPCErrorError);
$macro!(crate::AddConversationListenerParams);
$macro!(crate::AddConversationSubscriptionResponse);
$macro!(crate::ApplyPatchApprovalParams);
$macro!(crate::ApplyPatchApprovalResponse);
$macro!(crate::ArchiveConversationParams);
$macro!(crate::ArchiveConversationResponse);
$macro!(crate::AuthMode);
$macro!(crate::AuthStatusChangeNotification);
$macro!(crate::CancelLoginChatGptParams);
$macro!(crate::CancelLoginChatGptResponse);
$macro!(crate::ClientInfo);
$macro!(crate::ClientNotification);
$macro!(crate::ClientRequest);
$macro!(crate::ConversationSummary);
$macro!(crate::ExecCommandApprovalParams);
$macro!(crate::ExecCommandApprovalResponse);
$macro!(crate::ExecOneOffCommandParams);
$macro!(crate::ExecOneOffCommandResponse);
$macro!(crate::FuzzyFileSearchParams);
$macro!(crate::FuzzyFileSearchResponse);
$macro!(crate::FuzzyFileSearchResult);
$macro!(crate::GetAuthStatusParams);
$macro!(crate::GetAuthStatusResponse);
$macro!(crate::GetUserAgentResponse);
$macro!(crate::GetUserSavedConfigResponse);
$macro!(crate::GitDiffToRemoteParams);
$macro!(crate::GitDiffToRemoteResponse);
$macro!(crate::GitSha);
$macro!(crate::InitializeParams);
$macro!(crate::InitializeResponse);
$macro!(crate::InputItem);
$macro!(crate::InterruptConversationParams);
$macro!(crate::InterruptConversationResponse);
$macro!(crate::ListConversationsParams);
$macro!(crate::ListConversationsResponse);
$macro!(crate::LoginApiKeyParams);
$macro!(crate::LoginApiKeyResponse);
$macro!(crate::LoginChatGptCompleteNotification);
$macro!(crate::LoginChatGptResponse);
$macro!(crate::LogoutChatGptParams);
$macro!(crate::LogoutChatGptResponse);
$macro!(crate::NewConversationParams);
$macro!(crate::NewConversationResponse);
$macro!(crate::Profile);
$macro!(crate::RemoveConversationListenerParams);
$macro!(crate::RemoveConversationSubscriptionResponse);
$macro!(crate::ResumeConversationParams);
$macro!(crate::ResumeConversationResponse);
$macro!(crate::SandboxSettings);
$macro!(crate::SendUserMessageParams);
$macro!(crate::SendUserMessageResponse);
$macro!(crate::SendUserTurnParams);
$macro!(crate::SendUserTurnResponse);
$macro!(crate::ServerNotification);
$macro!(crate::ServerRequest);
$macro!(crate::SessionConfiguredNotification);
$macro!(crate::SetDefaultModelParams);
$macro!(crate::SetDefaultModelResponse);
$macro!(crate::Tools);
$macro!(crate::UserInfoResponse);
$macro!(crate::UserSavedConfig);
$macro!(codex_protocol::protocol::EventMsg);
$macro!(codex_protocol::protocol::FileChange);
$macro!(codex_protocol::parse_command::ParsedCommand);
$macro!(codex_protocol::protocol::SandboxPolicy);
};
}
impl GeneratedSchema {
fn namespace(&self) -> Option<&str> {
self.namespace.as_deref()
}
fn logical_name(&self) -> &str {
&self.logical_name
}
fn value(&self) -> &Value {
&self.value
}
}
type JsonSchemaEmitter = fn(&Path) -> Result<GeneratedSchema>;
pub fn generate_types(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
generate_ts(out_dir, prettier)?;
generate_json(out_dir)?;
@@ -65,9 +112,7 @@ pub fn generate_types(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
}
pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
let v2_out_dir = out_dir.join("v2");
ensure_dir(out_dir)?;
ensure_dir(&v2_out_dir)?;
ClientRequest::export_all_to(out_dir)?;
export_client_responses(out_dir)?;
@@ -78,22 +123,17 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
ServerNotification::export_all_to(out_dir)?;
generate_index_ts(out_dir)?;
generate_index_ts(&v2_out_dir)?;
// Ensure our header is present on all TS files (root + subdirs like v2/).
let ts_files = ts_files_in_recursive(out_dir)?;
let ts_files = ts_files_in(out_dir)?;
for file in &ts_files {
prepend_header_if_missing(file)?;
}
// Optionally run Prettier on all generated TS files.
if let Some(prettier_bin) = prettier
&& !ts_files.is_empty()
{
let status = Command::new(prettier_bin)
.arg("--write")
.arg("--log-level")
.arg("warn")
.args(ts_files.iter().map(|p| p.as_os_str()))
.status()
.with_context(|| format!("Failed to invoke Prettier at {}", prettier_bin.display()))?;
@@ -107,47 +147,23 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
pub fn generate_json(out_dir: &Path) -> Result<()> {
ensure_dir(out_dir)?;
let envelope_emitters: &[JsonSchemaEmitter] = &[
|d| write_json_schema_with_return::<crate::RequestId>(d, "RequestId"),
|d| write_json_schema_with_return::<crate::JSONRPCMessage>(d, "JSONRPCMessage"),
|d| write_json_schema_with_return::<crate::JSONRPCRequest>(d, "JSONRPCRequest"),
|d| write_json_schema_with_return::<crate::JSONRPCNotification>(d, "JSONRPCNotification"),
|d| write_json_schema_with_return::<crate::JSONRPCResponse>(d, "JSONRPCResponse"),
|d| write_json_schema_with_return::<crate::JSONRPCError>(d, "JSONRPCError"),
|d| write_json_schema_with_return::<crate::JSONRPCErrorError>(d, "JSONRPCErrorError"),
|d| write_json_schema_with_return::<crate::ClientRequest>(d, "ClientRequest"),
|d| write_json_schema_with_return::<crate::ServerRequest>(d, "ServerRequest"),
|d| write_json_schema_with_return::<crate::ClientNotification>(d, "ClientNotification"),
|d| write_json_schema_with_return::<crate::ServerNotification>(d, "ServerNotification"),
|d| write_json_schema_with_return::<EventMsg>(d, "EventMsg"),
|d| write_json_schema_with_return::<FileChange>(d, "FileChange"),
|d| write_json_schema_with_return::<crate::protocol::v1::InputItem>(d, "InputItem"),
|d| write_json_schema_with_return::<ParsedCommand>(d, "ParsedCommand"),
|d| write_json_schema_with_return::<SandboxPolicy>(d, "SandboxPolicy"),
];
let mut bundle: BTreeMap<String, RootSchema> = BTreeMap::new();
let mut schemas: Vec<GeneratedSchema> = Vec::new();
for emit in envelope_emitters {
schemas.push(emit(out_dir)?);
macro_rules! add_schema {
($ty:path) => {{
let name = type_basename(stringify!($ty));
let schema = write_json_schema_with_return::<$ty>(out_dir, &name)?;
bundle.insert(name, schema);
}};
}
schemas.extend(export_client_param_schemas(out_dir)?);
schemas.extend(export_client_response_schemas(out_dir)?);
schemas.extend(export_server_param_schemas(out_dir)?);
schemas.extend(export_server_response_schemas(out_dir)?);
schemas.extend(export_client_notification_schemas(out_dir)?);
schemas.extend(export_server_notification_schemas(out_dir)?);
for_each_schema_type!(add_schema);
let bundle = build_schema_bundle(schemas)?;
write_pretty_json(
out_dir.join("codex_app_server_protocol.schemas.json"),
&bundle,
)?;
export_client_response_schemas(out_dir)?;
export_server_response_schemas(out_dir)?;
Ok(())
}
let mut definitions = Map::new();
fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
const SPECIAL_DEFINITIONS: &[&str] = &[
"ClientNotification",
"ClientRequest",
@@ -160,62 +176,22 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
"ServerRequest",
];
let namespaced_types = collect_namespaced_types(&schemas);
let mut definitions = Map::new();
for (name, schema) in bundle {
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(name.as_str()));
for schema in schemas {
let GeneratedSchema {
namespace,
logical_name,
mut value,
in_v1_dir,
} = schema;
if let Some(ref ns) = namespace {
rewrite_refs_to_namespace(&mut value, ns);
}
let mut forced_namespace_refs: Vec<(String, String)> = Vec::new();
if let Value::Object(ref mut obj) = value
if let Value::Object(ref mut obj) = schema_value
&& let Some(defs) = obj.remove("definitions")
&& let Value::Object(defs_obj) = defs
{
for (def_name, mut def_schema) in defs_obj {
if SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
continue;
}
annotate_schema(&mut def_schema, Some(def_name.as_str()));
let target_namespace = match namespace {
Some(ref ns) => Some(ns.clone()),
None => namespace_for_definition(&def_name, &namespaced_types)
.cloned()
.filter(|_| !in_v1_dir),
};
if let Some(ref ns) = target_namespace {
if namespace.as_deref() == Some(ns.as_str()) {
rewrite_refs_to_namespace(&mut def_schema, ns);
insert_into_namespace(&mut definitions, ns, def_name.clone(), def_schema)?;
} else if !forced_namespace_refs
.iter()
.any(|(name, existing_ns)| name == &def_name && existing_ns == ns)
{
forced_namespace_refs.push((def_name.clone(), ns.clone()));
}
} else {
if !SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
annotate_schema(&mut def_schema, Some(def_name.as_str()));
definitions.insert(def_name, def_schema);
}
}
}
for (name, ns) in forced_namespace_refs {
rewrite_named_ref_to_namespace(&mut value, &ns, &name);
}
if let Some(ref ns) = namespace {
insert_into_namespace(&mut definitions, ns, logical_name.clone(), value)?;
} else {
definitions.insert(logical_name, value);
}
definitions.insert(name, schema_value);
}
let mut root = Map::new();
@@ -230,28 +206,15 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
root.insert("type".to_string(), Value::String("object".into()));
root.insert("definitions".to_string(), Value::Object(definitions));
Ok(Value::Object(root))
write_pretty_json(
out_dir.join("codex_app_server_protocol.schemas.json"),
&Value::Object(root),
)?;
Ok(())
}
fn insert_into_namespace(
definitions: &mut Map<String, Value>,
namespace: &str,
name: String,
schema: Value,
) -> Result<()> {
let entry = definitions
.entry(namespace.to_string())
.or_insert_with(|| Value::Object(Map::new()));
match entry {
Value::Object(map) => {
map.insert(name, schema);
Ok(())
}
_ => Err(anyhow!("expected namespace {namespace} to be an object")),
}
}
fn write_json_schema_with_return<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
fn write_json_schema_with_return<T>(out_dir: &Path, name: &str) -> Result<RootSchema>
where
T: JsonSchema,
{
@@ -259,37 +222,17 @@ where
let schema = schema_for!(T);
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(file_stem));
// If the name looks like a namespaced path (e.g., "v2::Type"), mirror
// the TypeScript layout and write to out_dir/v2/Type.json. Otherwise
// write alongside the legacy files.
let (raw_namespace, logical_name) = split_namespace(file_stem);
let out_path = if let Some(ns) = raw_namespace {
let dir = out_dir.join(ns);
ensure_dir(&dir)?;
dir.join(format!("{logical_name}.json"))
} else {
out_dir.join(format!("{file_stem}.json"))
};
write_pretty_json(out_path, &schema_value)
write_pretty_json(out_dir.join(format!("{file_stem}.json")), &schema_value)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
let namespace = match raw_namespace {
Some("v1") | None => None,
Some(ns) => Some(ns.to_string()),
};
Ok(GeneratedSchema {
in_v1_dir: raw_namespace == Some("v1"),
namespace,
logical_name: logical_name.to_string(),
value: schema_value,
})
let annotated_schema = serde_json::from_value(schema_value)?;
Ok(annotated_schema)
}
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<GeneratedSchema>
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<()>
where
T: JsonSchema,
{
write_json_schema_with_return::<T>(out_dir, name)
write_json_schema_with_return::<T>(out_dir, name).map(|_| ())
}
fn write_pretty_json(path: PathBuf, value: &impl Serialize) -> Result<()> {
@@ -298,73 +241,13 @@ fn write_pretty_json(path: PathBuf, value: &impl Serialize) -> Result<()> {
fs::write(&path, json).with_context(|| format!("Failed to write {}", path.display()))?;
Ok(())
}
/// Split a fully-qualified type name like "v2::Type" into its namespace and logical name.
fn split_namespace(name: &str) -> (Option<&str>, &str) {
name.split_once("::")
.map_or((None, name), |(ns, rest)| (Some(ns), rest))
}
/// Recursively rewrite $ref values that point at "#/definitions/..." so that
/// they point to a namespaced location under the bundle.
fn rewrite_refs_to_namespace(value: &mut Value, ns: &str) {
match value {
Value::Object(obj) => {
if let Some(Value::String(r)) = obj.get_mut("$ref")
&& let Some(suffix) = r.strip_prefix("#/definitions/")
{
let prefix = format!("{ns}/");
if !suffix.starts_with(&prefix) {
*r = format!("#/definitions/{ns}/{suffix}");
}
}
for v in obj.values_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
Value::Array(items) => {
for v in items.iter_mut() {
rewrite_refs_to_namespace(v, ns);
}
}
_ => {}
}
}
fn collect_namespaced_types(schemas: &[GeneratedSchema]) -> HashMap<String, String> {
let mut types = HashMap::new();
for schema in schemas {
if let Some(ns) = schema.namespace() {
types
.entry(schema.logical_name().to_string())
.or_insert_with(|| ns.to_string());
if let Some(Value::Object(defs)) = schema.value().get("definitions") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
if let Some(Value::Object(defs)) = schema.value().get("$defs") {
for key in defs.keys() {
types.entry(key.clone()).or_insert_with(|| ns.to_string());
}
}
}
}
types
}
fn namespace_for_definition<'a>(
name: &str,
types: &'a HashMap<String, String>,
) -> Option<&'a String> {
if let Some(ns) = types.get(name) {
return Some(ns);
}
let trimmed = name.trim_end_matches(|c: char| c.is_ascii_digit());
if trimmed != name {
return types.get(trimmed);
}
None
fn type_basename(type_path: &str) -> String {
type_path
.rsplit_once("::")
.map(|(_, name)| name)
.unwrap_or(type_path)
.trim()
.to_string()
}
fn variant_definition_name(base: &str, variant: &Value) -> Option<String> {
@@ -584,33 +467,6 @@ fn ensure_dir(dir: &Path) -> Result<()> {
.with_context(|| format!("Failed to create output directory {}", dir.display()))
}
fn rewrite_named_ref_to_namespace(value: &mut Value, ns: &str, name: &str) {
let direct = format!("#/definitions/{name}");
let prefixed = format!("{direct}/");
let replacement = format!("#/definitions/{ns}/{name}");
let replacement_prefixed = format!("{replacement}/");
match value {
Value::Object(obj) => {
if let Some(Value::String(reference)) = obj.get_mut("$ref") {
if reference == &direct {
*reference = replacement;
} else if let Some(rest) = reference.strip_prefix(&prefixed) {
*reference = format!("{replacement_prefixed}{rest}");
}
}
for child in obj.values_mut() {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
Value::Array(items) => {
for child in items {
rewrite_named_ref_to_namespace(child, ns, name);
}
}
_ => {}
}
}
fn prepend_header_if_missing(path: &Path) -> Result<()> {
let mut content = String::new();
{
@@ -648,28 +504,6 @@ fn ts_files_in(dir: &Path) -> Result<Vec<PathBuf>> {
Ok(files)
}
fn ts_files_in_recursive(dir: &Path) -> Result<Vec<PathBuf>> {
let mut files = Vec::new();
let mut stack = vec![dir.to_path_buf()];
while let Some(d) = stack.pop() {
for entry in
fs::read_dir(&d).with_context(|| format!("Failed to read dir {}", d.display()))?
{
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
} else if path.is_file() && path.extension() == Some(OsStr::new("ts")) {
files.push(path);
}
}
}
files.sort();
Ok(files)
}
/// Generate an index.ts file that re-exports all generated types.
/// This allows consumers to import all types from a single file.
fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
let mut entries: Vec<String> = Vec::new();
let mut stems: Vec<String> = ts_files_in(out_dir)?
@@ -686,14 +520,6 @@ fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
entries.push(format!("export type {{ {name} }} from \"./{name}\";\n"));
}
// If this is the root out_dir and a ./v2 folder exists with TS files,
// expose it as a namespace to avoid symbol collisions at the root.
let v2_dir = out_dir.join("v2");
let has_v2_ts = ts_files_in(&v2_dir).map(|v| !v.is_empty()).unwrap_or(false);
if has_v2_ts {
entries.push("export * as v2 from \"./v2\";\n".to_string());
}
let mut content =
String::with_capacity(HEADER.len() + entries.iter().map(String::len).sum::<usize>());
content.push_str(HEADER);
@@ -720,7 +546,6 @@ mod tests {
#[test]
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
// Assert that there are no types of the form "?: T | null" in the generated TS files.
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
fs::create_dir(&output_dir)?;

View File

@@ -1,17 +1,15 @@
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use crate::JSONRPCNotification;
use crate::JSONRPCRequest;
use crate::RequestId;
use crate::export::GeneratedSchema;
use crate::export::write_json_schema;
use crate::protocol::v1;
use crate::protocol::v2;
use codex_protocol::ConversationId;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxCommandAssessment;
use paste::paste;
@@ -46,7 +44,7 @@ macro_rules! client_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? {
$variant:ident {
params: $(#[$params_meta:meta])* $params:ty,
response: $response:ty,
}
@@ -58,7 +56,6 @@ macro_rules! client_request_definitions {
pub enum ClientRequest {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)])?
$variant {
#[serde(rename = "id")]
request_id: RequestId,
@@ -77,103 +74,66 @@ macro_rules! client_request_definitions {
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
) -> ::anyhow::Result<()> {
$(
schemas.push(write_json_schema::<$response>(out_dir, stringify!($response))?);
crate::export::write_json_schema::<$response>(out_dir, stringify!($response))?;
)*
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_client_param_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(
schemas.push(write_json_schema::<$params>(out_dir, stringify!($params))?);
)*
Ok(schemas)
Ok(())
}
};
}
client_request_definitions! {
Initialize {
params: v1::InitializeParams,
response: v1::InitializeResponse,
},
/// NEW APIs
// Thread lifecycle
ThreadStart => "thread/start" {
params: v2::ThreadStartParams,
response: v2::ThreadStartResponse,
},
ThreadResume => "thread/resume" {
params: v2::ThreadResumeParams,
response: v2::ThreadResumeResponse,
},
ThreadArchive => "thread/archive" {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
ThreadList => "thread/list" {
params: v2::ThreadListParams,
response: v2::ThreadListResponse,
},
ThreadCompact => "thread/compact" {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
},
TurnInterrupt => "turn/interrupt" {
params: v2::TurnInterruptParams,
response: v2::TurnInterruptResponse,
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]
ListModels {
params: v2::ListModelsParams,
response: v2::ListModelsResponse,
},
ModelList => "model/list" {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
LoginAccount => "account/login/start" {
#[serde(rename = "account/login")]
#[ts(rename = "account/login")]
LoginAccount {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
},
CancelLoginAccount => "account/login/cancel" {
params: v2::CancelLoginAccountParams,
response: v2::CancelLoginAccountResponse,
},
LogoutAccount => "account/logout" {
#[serde(rename = "account/logout")]
#[ts(rename = "account/logout")]
LogoutAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::LogoutAccountResponse,
},
GetAccountRateLimits => "account/rateLimits/read" {
#[serde(rename = "account/rateLimits/read")]
#[ts(rename = "account/rateLimits/read")]
GetAccountRateLimits {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountRateLimitsResponse,
},
FeedbackUpload => "feedback/upload" {
params: v2::FeedbackUploadParams,
response: v2::FeedbackUploadResponse,
#[serde(rename = "feedback/upload")]
#[ts(rename = "feedback/upload")]
UploadFeedback {
params: v2::UploadFeedbackParams,
response: v2::UploadFeedbackResponse,
},
GetAccount => "account/read" {
params: v2::GetAccountParams,
#[serde(rename = "account/read")]
#[ts(rename = "account/read")]
GetAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountResponse,
},
/// DEPRECATED APIs below
Initialize {
params: v1::InitializeParams,
response: v1::InitializeResponse,
},
NewConversation {
params: v1::NewConversationParams,
response: v1::NewConversationResponse,
@@ -228,7 +188,6 @@ client_request_definitions! {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LoginChatGptResponse,
},
// DEPRECATED in favor of CancelLoginAccount
CancelLoginChatGpt {
params: v1::CancelLoginChatGptParams,
response: v1::CancelLoginChatGptResponse,
@@ -237,7 +196,6 @@ client_request_definitions! {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LogoutChatGptResponse,
},
/// DEPRECATED in favor of GetAccount
GetAuthStatus {
params: v1::GetAuthStatusParams,
response: v1::GetAuthStatusResponse,
@@ -318,101 +276,13 @@ macro_rules! server_request_definitions {
Ok(())
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_response_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
paste! {
$(schemas.push(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?);)*
}
Ok(schemas)
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_param_schemas(
out_dir: &Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
paste! {
$(schemas.push(crate::export::write_json_schema::<[<$variant Params>]>(out_dir, stringify!([<$variant Params>]))?);)*
}
Ok(schemas)
}
};
}
/// Generates `ServerNotification` enum and helpers, including a JSON Schema
/// exporter for each notification.
macro_rules! server_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $(=> $wire:literal)? ( $payload:ty )
),* $(,)?
) => {
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
$(
$(#[$variant_meta])*
$(#[serde(rename = $wire)] #[ts(rename = $wire)] #[strum(serialize = $wire)])?
$variant($payload),
)*
}
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
$(Self::$variant(params) => serde_json::to_value(params),)*
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
#[allow(clippy::vec_init_then_push)]
pub fn export_server_notification_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let mut schemas = Vec::new();
$(schemas.push(crate::export::write_json_schema::<$payload>(out_dir, stringify!($payload))?);)*
Ok(schemas)
}
};
}
/// Notifications sent from the client to the server.
macro_rules! client_notification_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident $( ( $payload:ty ) )?
),* $(,)?
) => {
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
$(
$(#[$variant_meta])*
$variant $( ( $payload ) )?,
)*
}
pub fn export_client_notification_schemas(
_out_dir: &::std::path::Path,
) -> ::anyhow::Result<Vec<GeneratedSchema>> {
let schemas = Vec::new();
$( $(schemas.push(crate::export::write_json_schema::<$payload>(_out_dir, stringify!($payload))?);)? )*
Ok(schemas)
) -> ::anyhow::Result<()> {
paste! {
$(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?;)*
}
Ok(())
}
};
}
@@ -496,33 +366,52 @@ pub struct FuzzyFileSearchResponse {
pub files: Vec<FuzzyFileSearchResult>,
}
server_notification_definitions! {
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
/// NEW NOTIFICATIONS
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
TurnStarted => "turn/started" (v2::TurnStartedNotification),
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
ItemStarted => "item/started" (v2::ItemStartedNotification),
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
#[serde(rename = "account/login/completed")]
#[ts(rename = "account/login/completed")]
#[strum(serialize = "account/login/completed")]
AccountLoginCompleted(v2::AccountLoginCompletedNotification),
#[serde(rename = "account/rateLimits/updated")]
#[ts(rename = "account/rateLimits/updated")]
#[strum(serialize = "account/rateLimits/updated")]
AccountRateLimitsUpdated(RateLimitSnapshot),
/// DEPRECATED NOTIFICATIONS below
/// Authentication status changed
AuthStatusChange(v1::AuthStatusChangeNotification),
/// Deprecated: use `account/login/completed` instead.
/// ChatGPT login flow completed
LoginChatGptComplete(v1::LoginChatGptCompleteNotification),
/// The special session configured event for a new or resumed conversation.
SessionConfigured(v1::SessionConfiguredNotification),
}
client_notification_definitions! {
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
ServerNotification::AccountRateLimitsUpdated(params) => serde_json::to_value(params),
ServerNotification::AuthStatusChange(params) => serde_json::to_value(params),
ServerNotification::LoginChatGptComplete(params) => serde_json::to_value(params),
ServerNotification::SessionConfigured(params) => serde_json::to_value(params),
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
/// Notification sent from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
Initialized,
}
@@ -682,7 +571,7 @@ mod tests {
};
assert_eq!(
json!({
"method": "account/login/start",
"method": "account/login",
"id": 2,
"params": {
"type": "apiKey",
@@ -698,11 +587,11 @@ mod tests {
fn serialize_account_login_chatgpt() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(3),
params: v2::LoginAccountParams::Chatgpt,
params: v2::LoginAccountParams::ChatGpt,
};
assert_eq!(
json!({
"method": "account/login/start",
"method": "account/login",
"id": 3,
"params": {
"type": "chatgpt"
@@ -733,17 +622,12 @@ mod tests {
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(5),
params: v2::GetAccountParams {
refresh_token: false,
},
params: None,
};
assert_eq!(
json!({
"method": "account/read",
"id": 5,
"params": {
"refreshToken": false
}
}),
serde_json::to_value(&request)?,
);
@@ -752,16 +636,19 @@ mod tests {
#[test]
fn account_serializes_fields_in_camel_case() -> Result<()> {
let api_key = v2::Account::ApiKey {};
let api_key = v2::Account::ApiKey {
api_key: "secret".to_string(),
};
assert_eq!(
json!({
"type": "apiKey",
"apiKey": "secret",
}),
serde_json::to_value(&api_key)?,
);
let chatgpt = v2::Account::Chatgpt {
email: "user@example.com".to_string(),
let chatgpt = v2::Account::ChatGpt {
email: Some("user@example.com".to_string()),
plan_type: PlanType::Plus,
};
assert_eq!(
@@ -778,16 +665,16 @@ mod tests {
#[test]
fn serialize_list_models() -> Result<()> {
let request = ClientRequest::ModelList {
let request = ClientRequest::ListModels {
request_id: RequestId::Integer(6),
params: v2::ModelListParams::default(),
params: v2::ListModelsParams::default(),
};
assert_eq!(
json!({
"method": "model/list",
"id": 6,
"params": {
"limit": null,
"pageSize": null,
"cursor": null
}
}),

View File

@@ -11,7 +11,6 @@ use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use schemars::JsonSchema;
use serde::Deserialize;
@@ -114,18 +113,6 @@ pub struct ConversationSummary {
pub preview: String,
pub timestamp: Option<String>,
pub model_provider: String,
pub cwd: PathBuf,
pub cli_version: String,
pub source: SessionSource,
pub git_info: Option<ConversationGitInfo>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
pub struct ConversationGitInfo {
pub sha: Option<String>,
pub branch: Option<String>,
pub origin_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -387,9 +374,10 @@ pub enum InputItem {
LocalImage { path: PathBuf },
}
// Deprecated notifications (v1)
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated in favor of AccountLoginCompletedNotification.
pub struct LoginChatGptCompleteNotification {
#[schemars(with = "String")]
pub login_id: Uuid,
@@ -412,7 +400,6 @@ pub struct SessionConfiguredNotification {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
/// Deprecated notification. Use AccountUpdatedNotification instead.
pub struct AuthStatusChangeNotification {
pub auth_method: Option<AuthMode>,
}

View File

@@ -1,144 +1,35 @@
use std::collections::HashMap;
use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
use codex_protocol::items::TurnItem as CoreTurnItem;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::user_input::UserInput as CoreUserInput;
use mcp_types::ContentBlock as McpContentBlock;
use codex_protocol::protocol::RateLimitSnapshot;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value as JsonValue;
use ts_rs::TS;
// Macro to declare a camelCased API v2 enum mirroring a core enum which
// tends to use kebab-case.
macro_rules! v2_enum_from_core {
(
pub enum $Name:ident from $Src:path { $( $Variant:ident ),+ $(,)? }
) => {
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum $Name { $( $Variant ),+ }
impl $Name {
pub fn to_core(self) -> $Src {
match self { $( $Name::$Variant => <$Src>::$Variant ),+ }
}
}
impl From<$Src> for $Name {
fn from(value: $Src) -> Self {
match value { $( <$Src>::$Variant => $Name::$Variant ),+ }
}
}
};
}
v2_enum_from_core!(
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
UnlessTrusted, OnFailure, OnRequest, Never
}
);
v2_enum_from_core!(
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
ReadOnly, WorkspaceWrite, DangerFullAccess
}
);
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "mode", rename_all = "camelCase")]
#[ts(tag = "mode")]
#[ts(export_to = "v2/")]
pub enum SandboxPolicy {
DangerFullAccess,
ReadOnly,
WorkspaceWrite {
#[serde(default)]
writable_roots: Vec<PathBuf>,
#[serde(default)]
network_access: bool,
#[serde(default)]
exclude_tmpdir_env_var: bool,
#[serde(default)]
exclude_slash_tmp: bool,
},
}
impl SandboxPolicy {
pub fn to_core(&self) -> codex_protocol::protocol::SandboxPolicy {
match self {
SandboxPolicy::DangerFullAccess => {
codex_protocol::protocol::SandboxPolicy::DangerFullAccess
}
SandboxPolicy::ReadOnly => codex_protocol::protocol::SandboxPolicy::ReadOnly,
SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
} => codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: writable_roots.clone(),
network_access: *network_access,
exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
exclude_slash_tmp: *exclude_slash_tmp,
},
}
}
}
impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
fn from(value: codex_protocol::protocol::SandboxPolicy) -> Self {
match value {
codex_protocol::protocol::SandboxPolicy::DangerFullAccess => {
SandboxPolicy::DangerFullAccess
}
codex_protocol::protocol::SandboxPolicy::ReadOnly => SandboxPolicy::ReadOnly,
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
} => SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
exclude_tmpdir_env_var,
exclude_slash_tmp,
},
}
}
}
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum Account {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey {},
ApiKey { api_key: String },
#[serde(rename = "chatgpt", rename_all = "camelCase")]
#[ts(rename = "chatgpt", rename_all = "camelCase")]
Chatgpt { email: String, plan_type: PlanType },
ChatGpt {
email: Option<String>,
plan_type: PlanType,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum LoginAccountParams {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
#[serde(rename = "apiKey")]
#[ts(rename = "apiKey")]
ApiKey {
#[serde(rename = "apiKey")]
#[ts(rename = "apiKey")]
@@ -146,81 +37,48 @@ pub enum LoginAccountParams {
},
#[serde(rename = "chatgpt")]
#[ts(rename = "chatgpt")]
Chatgpt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum LoginAccountResponse {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey {},
#[serde(rename = "chatgpt", rename_all = "camelCase")]
#[ts(rename = "chatgpt", rename_all = "camelCase")]
Chatgpt {
// Use plain String for identifiers to avoid TS/JSON Schema quirks around uuid-specific types.
// Convert to/from UUIDs at the application layer as needed.
login_id: String,
/// URL the client should open in a browser to initiate the OAuth flow.
auth_url: String,
},
ChatGpt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CancelLoginAccountParams {
pub login_id: String,
pub struct LoginAccountResponse {
/// Only set if the login method is ChatGPT.
#[schemars(with = "String")]
pub login_id: Option<Uuid>,
/// URL the client should open in a browser to initiate the OAuth flow.
/// Only set if the login method is ChatGPT.
pub auth_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CancelLoginAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountRateLimitsResponse {
pub rate_limits: RateLimitSnapshot,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountParams {
#[serde(default)]
pub refresh_token: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountResponse {
pub account: Option<Account>,
pub requires_openai_auth: bool,
pub account: Account,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelListParams {
pub struct ListModelsParams {
/// Optional page size; defaults to a reasonable server-side value.
pub page_size: Option<usize>,
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Model {
pub id: String,
pub model: String,
@@ -234,7 +92,6 @@ pub struct Model {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ReasoningEffortOption {
pub reasoning_effort: ReasoningEffort,
pub description: String,
@@ -242,18 +99,16 @@ pub struct ReasoningEffortOption {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelListResponse {
pub data: Vec<Model>,
pub struct ListModelsResponse {
pub items: Vec<Model>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
/// if None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadParams {
pub struct UploadFeedbackParams {
pub classification: String,
pub reason: Option<String>,
pub conversation_id: Option<ConversationId>,
@@ -262,595 +117,6 @@ pub struct FeedbackUploadParams {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadResponse {
pub struct UploadFeedbackResponse {
pub thread_id: String,
}
// === Threads, Turns, and Items ===
// Thread APIs
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartParams {
pub model: Option<String>,
pub model_provider: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadResumeParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadResumeResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadArchiveParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadArchiveResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Optional provider filter; when set, only sessions recorded under these
/// providers are returned. When present but empty, includes all providers.
pub model_providers: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadListResponse {
pub data: Vec<Thread>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadCompactParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadCompactResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Thread {
pub id: String,
/// Usually the first user message in the thread, if available.
pub preview: String,
pub model_provider: String,
/// Unix timestamp (in seconds) when the thread was created.
pub created_at: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountUpdatedNotification {
pub auth_mode: Option<AuthMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Turn {
pub id: String,
pub items: Vec<ThreadItem>,
pub status: TurnStatus,
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum TurnStatus {
Completed,
Interrupted,
Failed,
InProgress,
}
// Turn APIs
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartParams {
pub thread_id: String,
pub input: Vec<UserInput>,
/// Override the working directory for this turn and subsequent turns.
pub cwd: Option<PathBuf>,
/// Override the approval policy for this turn and subsequent turns.
pub approval_policy: Option<AskForApproval>,
/// Override the sandbox policy for this turn and subsequent turns.
pub sandbox_policy: Option<SandboxPolicy>,
/// Override the model for this turn and subsequent turns.
pub model: Option<String>,
/// Override the reasoning effort for this turn and subsequent turns.
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartResponse {
pub turn: Turn,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptParams {
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnInterruptResponse {}
// User input types
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum UserInput {
Text { text: String },
Image { url: String },
LocalImage { path: PathBuf },
}
impl UserInput {
pub fn into_core(self) -> CoreUserInput {
match self {
UserInput::Text { text } => CoreUserInput::Text { text },
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
}
}
}
impl From<CoreUserInput> for UserInput {
fn from(value: CoreUserInput) -> Self {
match value {
CoreUserInput::Text { text } => UserInput::Text { text },
CoreUserInput::Image { image_url } => UserInput::Image { url: image_url },
CoreUserInput::LocalImage { path } => UserInput::LocalImage { path },
_ => unreachable!("unsupported user input variant"),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum ThreadItem {
UserMessage {
id: String,
content: Vec<UserInput>,
},
AgentMessage {
id: String,
text: String,
},
Reasoning {
id: String,
text: String,
},
CommandExecution {
id: String,
command: String,
aggregated_output: String,
exit_code: Option<i32>,
status: CommandExecutionStatus,
duration_ms: Option<i64>,
},
FileChange {
id: String,
changes: Vec<FileUpdateChange>,
status: PatchApplyStatus,
},
McpToolCall {
id: String,
server: String,
tool: String,
status: McpToolCallStatus,
arguments: JsonValue,
result: Option<McpToolCallResult>,
error: Option<McpToolCallError>,
},
WebSearch {
id: String,
query: String,
},
TodoList {
id: String,
items: Vec<TodoItem>,
},
ImageView {
id: String,
path: String,
},
CodeReview {
id: String,
review: String,
},
}
impl From<CoreTurnItem> for ThreadItem {
fn from(value: CoreTurnItem) -> Self {
match value {
CoreTurnItem::UserMessage(user) => ThreadItem::UserMessage {
id: user.id,
content: user.content.into_iter().map(UserInput::from).collect(),
},
CoreTurnItem::AgentMessage(agent) => {
let text = agent
.content
.into_iter()
.map(|entry| match entry {
CoreAgentMessageContent::Text { text } => text,
})
.collect::<String>();
ThreadItem::AgentMessage { id: agent.id, text }
}
CoreTurnItem::Reasoning(reasoning) => {
let text = if !reasoning.summary_text.is_empty() {
reasoning.summary_text.join("\n")
} else {
reasoning.raw_content.join("\n")
};
ThreadItem::Reasoning {
id: reasoning.id,
text,
}
}
CoreTurnItem::WebSearch(search) => ThreadItem::WebSearch {
id: search.id,
query: search.query,
},
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CommandExecutionStatus {
InProgress,
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FileUpdateChange {
pub path: String,
pub kind: PatchChangeKind,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum PatchChangeKind {
Add,
Delete,
Update,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum PatchApplyStatus {
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum McpToolCallStatus {
InProgress,
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallResult {
pub content: Vec<McpContentBlock>,
pub structured_content: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TodoItem {
pub id: String,
pub text: String,
pub completed: bool,
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadStartedNotification {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnStartedNotification {
pub turn: Turn,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct Usage {
pub input_tokens: i32,
pub cached_input_tokens: i32,
pub output_tokens: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnCompletedNotification {
pub turn: Turn,
// TODO: should usage be stored on the Turn object, and we return that instead?
pub usage: Usage,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ItemStartedNotification {
pub item: ThreadItem,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ItemCompletedNotification {
pub item: ThreadItem,
}
// Item-specific progress notifications
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AgentMessageDeltaNotification {
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecutionOutputDeltaNotification {
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpToolCallProgressNotification {
pub item_id: String,
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountRateLimitsUpdatedNotification {
pub rate_limits: RateLimitSnapshot,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct RateLimitSnapshot {
pub primary: Option<RateLimitWindow>,
pub secondary: Option<RateLimitWindow>,
}
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
fn from(value: CoreRateLimitSnapshot) -> Self {
Self {
primary: value.primary.map(RateLimitWindow::from),
secondary: value.secondary.map(RateLimitWindow::from),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct RateLimitWindow {
pub used_percent: i32,
pub window_duration_mins: Option<i64>,
pub resets_at: Option<i64>,
}
impl From<CoreRateLimitWindow> for RateLimitWindow {
fn from(value: CoreRateLimitWindow) -> Self {
Self {
used_percent: value.used_percent.round() as i32,
window_duration_mins: value.window_minutes,
resets_at: value.resets_at,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AccountLoginCompletedNotification {
// Use plain String for identifiers to avoid TS/JSON Schema quirks around uuid-specific types.
// Convert to/from UUIDs at the application layer as needed.
pub login_id: Option<String>,
pub success: bool,
pub error: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::items::AgentMessageContent;
use codex_protocol::items::AgentMessageItem;
use codex_protocol::items::ReasoningItem;
use codex_protocol::items::TurnItem;
use codex_protocol::items::UserMessageItem;
use codex_protocol::items::WebSearchItem;
use codex_protocol::user_input::UserInput as CoreUserInput;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
#[test]
fn core_turn_item_into_thread_item_converts_supported_variants() {
let user_item = TurnItem::UserMessage(UserMessageItem {
id: "user-1".to_string(),
content: vec![
CoreUserInput::Text {
text: "hello".to_string(),
},
CoreUserInput::Image {
image_url: "https://example.com/image.png".to_string(),
},
CoreUserInput::LocalImage {
path: PathBuf::from("local/image.png"),
},
],
});
assert_eq!(
ThreadItem::from(user_item),
ThreadItem::UserMessage {
id: "user-1".to_string(),
content: vec![
UserInput::Text {
text: "hello".to_string(),
},
UserInput::Image {
url: "https://example.com/image.png".to_string(),
},
UserInput::LocalImage {
path: PathBuf::from("local/image.png"),
},
],
}
);
let agent_item = TurnItem::AgentMessage(AgentMessageItem {
id: "agent-1".to_string(),
content: vec![
AgentMessageContent::Text {
text: "Hello ".to_string(),
},
AgentMessageContent::Text {
text: "world".to_string(),
},
],
});
assert_eq!(
ThreadItem::from(agent_item),
ThreadItem::AgentMessage {
id: "agent-1".to_string(),
text: "Hello world".to_string(),
}
);
let reasoning_item = TurnItem::Reasoning(ReasoningItem {
id: "reasoning-1".to_string(),
summary_text: vec!["line one".to_string(), "line two".to_string()],
raw_content: vec![],
});
assert_eq!(
ThreadItem::from(reasoning_item),
ThreadItem::Reasoning {
id: "reasoning-1".to_string(),
text: "line one\nline two".to_string(),
}
);
let search_item = TurnItem::WebSearch(WebSearchItem {
id: "search-1".to_string(),
query: "docs".to_string(),
});
assert_eq!(
ThreadItem::from(search_item),
ThreadItem::WebSearch {
id: "search-1".to_string(),
query: "docs".to_string(),
}
);
}
}

View File

@@ -1,6 +1,6 @@
# codex-app-server
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
`codex app-server` is the harness Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
## Protocol
@@ -8,253 +8,8 @@ Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports
## Message Schema
Currently, you can dump a TypeScript version of the schema using `codex app-server generate-ts`, or a JSON Schema bundle via `codex app-server generate-json-schema`. Each output is specific to the version of Codex you used to run the command, so the generated artifacts are guaranteed to match that version.
Currently, you can dump a TypeScript version of the schema using `codex generate-ts`. It is specific to the version of Codex you used to run `generate-ts`, so the two are guaranteed to be compatible.
```
codex app-server generate-ts --out DIR
codex app-server generate-json-schema --out DIR
codex generate-ts --out DIR
```
## Initialization
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
Example:
```json
{ "method": "initialize", "id": 0, "params": {
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
} }
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
{ "method": "initialized" }
```
## Core primitives
We have 3 top level primitives:
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
## Thread & turn endpoints
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
### Quick reference
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
### 1) Start or resume a thread
Start a fresh thread when you need a new Codex conversation.
```json
{ "method": "thread/start", "id": 10, "params": {
// Optionally set config settings. If not specified, will use the user's
// current config settings.
"model": "gpt-5-codex",
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
} }
{ "id": 10, "result": {
"thread": {
"id": "thr_123",
"preview": "",
"modelProvider": "openai",
"createdAt": 1730910000
}
} }
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
```json
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
### 2) List threads (pagination & filters)
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
- `limit` — server defaults to a reasonable page size if unset.
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
Example:
```json
{ "method": "thread/list", "id": 20, "params": {
"cursor": null,
"limit": 25,
} }
{ "id": 20, "result": {
"data": [
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000 }
],
"nextCursor": "opaque-token-or-null"
} }
```
When `nextCursor` is `null`, youve reached the final page.
### 3) Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
```json
{ "method": "thread/archive", "id": 21, "params": { "threadId": "thr_b" } }
{ "id": 21, "result": {} }
```
An archived thread will not appear in future calls to `thread/list`.
### 4) Start a turn (send user input)
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
- `{"type":"text","text":"Explain this diff"}`
- `{"type":"image","url":"https://…png"}`
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
```json
{ "method": "turn/start", "id": 30, "params": {
"threadId": "thr_123",
"input": [ { "type": "text", "text": "Run tests" } ],
// Below are optional config overrides
"cwd": "/Users/me/project",
"approvalPolicy": "unlessTrusted",
"sandboxPolicy": {
"mode": "workspaceWrite",
"writableRoots": ["/Users/me/project"],
"networkAccess": true
},
"model": "gpt-5-codex",
"effort": "medium",
"summary": "concise"
} }
{ "id": 30, "result": { "turn": {
"id": "turn_456",
"status": "inProgress",
"items": [],
"error": null
} } }
```
### 5) Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
```json
{ "method": "turn/interrupt", "id": 31, "params": {
"threadId": "thr_123",
"turnId": "turn_456"
} }
{ "id": 31, "result": {} }
```
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### Quick reference
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
- `account/login/cancel` — cancel a pending ChatGPT login by `loginId`.
- `account/logout` — sign out; triggers `account/updated`.
- `account/updated` (notify) — emitted whenever auth mode changes (`authMode`: `apikey`, `chatgpt`, or `null`).
- `account/rateLimits/read` — fetch ChatGPT rate limits; updates arrive via `account/rateLimits/updated` (notify).
### 1) Check auth state
Request:
```json
{ "method": "account/read", "id": 1, "params": { "refreshToken": false } }
```
Response examples:
```json
{ "id": 1, "result": { "account": null, "requiresOpenaiAuth": false } } // No OpenAI auth needed (e.g., OSS/local models)
{ "id": 1, "result": { "account": null, "requiresOpenaiAuth": true } } // OpenAI auth required (typical for OpenAI-hosted models)
{ "id": 1, "result": { "account": { "type": "apiKey" }, "requiresOpenaiAuth": true } }
{ "id": 1, "result": { "account": { "type": "chatgpt", "email": "user@example.com", "planType": "pro" }, "requiresOpenaiAuth": true } }
```
Field notes:
- `refreshToken` (bool): set `true` to force a token refresh.
- `requiresOpenaiAuth` reflects the active provider; when `false`, Codex can run without OpenAI credentials.
### 2) Log in with an API key
1. Send:
```json
{ "method": "account/login/start", "id": 2, "params": { "type": "apiKey", "apiKey": "sk-…" } }
```
2. Expect:
```json
{ "id": 2, "result": { "type": "apiKey" } }
```
3. Notifications:
```json
{ "method": "account/login/completed", "params": { "loginId": null, "success": true, "error": null } }
{ "method": "account/updated", "params": { "authMode": "apikey" } }
```
### 3) Log in with ChatGPT (browser flow)
1. Start:
```json
{ "method": "account/login/start", "id": 3, "params": { "type": "chatgpt" } }
{ "id": 3, "result": { "type": "chatgpt", "loginId": "<uuid>", "authUrl": "https://chatgpt.com/…&redirect_uri=http%3A%2F%2Flocalhost%3A<port>%2Fauth%2Fcallback" } }
```
2. Open `authUrl` in a browser; the app-server hosts the local callback.
3. Wait for notifications:
```json
{ "method": "account/login/completed", "params": { "loginId": "<uuid>", "success": true, "error": null } }
{ "method": "account/updated", "params": { "authMode": "chatgpt" } }
```
### 4) Cancel a ChatGPT login
```json
{ "method": "account/login/cancel", "id": 4, "params": { "loginId": "<uuid>" } }
{ "method": "account/login/completed", "params": { "loginId": "<uuid>", "success": false, "error": "…" } }
```
### 5) Logout
```json
{ "method": "account/logout", "id": 5 }
{ "id": 5, "result": {} }
{ "method": "account/updated", "params": { "authMode": null } }
```
### 6) Rate limits (ChatGPT)
```json
{ "method": "account/rateLimits/read", "id": 6 }
{ "id": 6, "result": { "rateLimits": { "primary": { "usedPercent": 25, "windowDurationMins": 15, "resetsAt": 1730947200 }, "secondary": null } } }
{ "method": "account/rateLimits/updated", "params": { "rateLimits": { … } } }
```
Field notes:
- `usedPercent` is current usage within the OpenAI quota window.
- `windowDurationMins` is the quota window length.
- `resetsAt` is a Unix timestamp (seconds) for the next reset.
### Dev notes
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.

File diff suppressed because it is too large Load Diff

View File

@@ -64,79 +64,64 @@ impl MessageProcessor {
pub(crate) async fn process_request(&mut self, request: JSONRPCRequest) {
let request_id = request.id.clone();
let request_json = match serde_json::to_value(&request) {
Ok(request_json) => request_json,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
if let Ok(request_json) = serde_json::to_value(request)
&& let Ok(codex_request) = serde_json::from_value::<ClientRequest>(request_json)
{
match codex_request {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
} else {
let ClientInfo {
name,
title: _title,
version,
} = params.client_info;
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
}
let codex_request = match serde_json::from_value::<ClientRequest>(request_json) {
Ok(codex_request) => codex_request,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("Invalid request: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let user_agent = get_codex_user_agent();
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
match codex_request {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
} else {
let ClientInfo {
name,
title: _title,
version,
} = params.client_info;
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
self.initialized = true;
return;
}
}
_ => {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let user_agent = get_codex_user_agent();
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
self.initialized = true;
return;
}
}
_ => {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
}
self.codex_message_processor
.process_request(codex_request)
.await;
} else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Invalid request".to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
self.codex_message_processor
.process_request(codex_request)
.await;
}
pub(crate) async fn process_notification(&self, notification: JSONRPCNotification) {

View File

@@ -1,12 +1,11 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()

View File

@@ -141,13 +141,9 @@ pub(crate) struct OutgoingError {
#[cfg(test)]
mod tests {
use codex_app_server_protocol::AccountLoginCompletedNotification;
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use uuid::Uuid;
@@ -179,78 +175,28 @@ mod tests {
);
}
#[test]
fn verify_account_login_completed_notification_serialization() {
let notification =
ServerNotification::AccountLoginCompleted(AccountLoginCompletedNotification {
login_id: Some(Uuid::nil().to_string()),
success: true,
error: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/login/completed",
"params": {
"loginId": Uuid::nil().to_string(),
"success": true,
"error": null,
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_rate_limits_notification_serialization() {
let notification =
ServerNotification::AccountRateLimitsUpdated(AccountRateLimitsUpdatedNotification {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25,
window_duration_mins: Some(15),
resets_at: Some(123),
}),
secondary: None,
},
});
let notification = ServerNotification::AccountRateLimitsUpdated(RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25.0,
window_minutes: Some(15),
resets_at: Some(123),
}),
secondary: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/rateLimits/updated",
"params": {
"rateLimits": {
"primary": {
"usedPercent": 25,
"windowDurationMins": 15,
"resetsAt": 123
},
"secondary": null
}
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_account_updated_notification_serialization() {
let notification = ServerNotification::AccountUpdated(AccountUpdatedNotification {
auth_mode: Some(AuthMode::ApiKey),
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/updated",
"params": {
"authMode": "apikey"
"primary": {
"used_percent": 25.0,
"window_minutes": 15,
"resets_at": 123,
},
"secondary": null,
},
}),
serde_json::to_value(jsonrpc_notification)

View File

@@ -13,7 +13,6 @@ base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
@@ -22,5 +21,4 @@ tokio = { workspace = true, features = [
"process",
"rt-multi-thread",
] }
uuid = { workspace = true }
wiremock = { workspace = true }

View File

@@ -2,7 +2,6 @@ mod auth_fixtures;
mod mcp_process;
mod mock_model_server;
mod responses;
mod rollout;
pub use auth_fixtures::ChatGptAuthFixture;
pub use auth_fixtures::ChatGptIdTokenClaims;
@@ -11,11 +10,9 @@ pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_chat_completions_server;
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_shell_sse_response;
pub use rollout::create_fake_rollout;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {

View File

@@ -14,37 +14,30 @@ use anyhow::Context;
use assert_cmd::prelude::*;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::UploadFeedbackParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use std::process::Command as StdCommand;
use tokio::process::Command;
@@ -250,19 +243,10 @@ impl McpProcess {
self.send_request("account/rateLimits/read", None).await
}
/// Send an `account/read` JSON-RPC request.
pub async fn send_get_account_request(
&mut self,
params: GetAccountParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("account/read", params).await
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
pub async fn send_upload_feedback_request(
&mut self,
params: FeedbackUploadParams,
params: UploadFeedbackParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("feedback/upload", params).await
@@ -291,46 +275,10 @@ impl McpProcess {
self.send_request("listConversations", params).await
}
/// Send a `thread/start` JSON-RPC request.
pub async fn send_thread_start_request(
&mut self,
params: ThreadStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/start", params).await
}
/// Send a `thread/resume` JSON-RPC request.
pub async fn send_thread_resume_request(
&mut self,
params: ThreadResumeParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/resume", params).await
}
/// Send a `thread/archive` JSON-RPC request.
pub async fn send_thread_archive_request(
&mut self,
params: ThreadArchiveParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/archive", params).await
}
/// Send a `thread/list` JSON-RPC request.
pub async fn send_thread_list_request(
&mut self,
params: ThreadListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/list", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
params: ModelListParams,
params: ListModelsParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("model/list", params).await
@@ -359,24 +307,6 @@ impl McpProcess {
self.send_request("loginChatGpt", None).await
}
/// Send a `turn/start` JSON-RPC request (v2).
pub async fn send_turn_start_request(
&mut self,
params: TurnStartParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/start", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
params: TurnInterruptParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("turn/interrupt", params).await
}
/// Send a `cancelLoginChatGpt` JSON-RPC request.
pub async fn send_cancel_login_chat_gpt_request(
&mut self,
@@ -391,40 +321,6 @@ impl McpProcess {
self.send_request("logoutChatGpt", None).await
}
/// Send an `account/logout` JSON-RPC request.
pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/logout", None).await
}
/// Send an `account/login/start` JSON-RPC request for API key login.
pub async fn send_login_account_api_key_request(
&mut self,
api_key: &str,
) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "apiKey",
"apiKey": api_key,
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/start` JSON-RPC request for ChatGPT login.
pub async fn send_login_account_chatgpt_request(&mut self) -> anyhow::Result<i64> {
let params = serde_json::json!({
"type": "chatgpt"
});
self.send_request("account/login/start", Some(params)).await
}
/// Send an `account/login/cancel` JSON-RPC request.
pub async fn send_cancel_login_account_request(
&mut self,
params: CancelLoginAccountParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("account/login/cancel", params).await
}
/// Send a `fuzzyFileSearch` JSON-RPC request.
pub async fn send_fuzzy_file_search_request(
&mut self,

View File

@@ -29,25 +29,6 @@ pub async fn create_mock_chat_completions_server(responses: Vec<String>) -> Mock
server
}
/// Same as `create_mock_chat_completions_server` but does not enforce an
/// expectation on the number of calls.
pub async fn create_mock_chat_completions_server_unchecked(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let seq_responder = SeqResponder {
num_calls: AtomicUsize::new(0),
responses,
};
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.mount(&server)
.await;
server
}
struct SeqResponder {
num_calls: AtomicUsize,
responses: Vec<String>,

View File

@@ -1,82 +0,0 @@
use anyhow::Result;
use codex_protocol::ConversationId;
use codex_protocol::protocol::SessionMeta;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
/// - `meta_rfc3339` is the envelope timestamp used in JSON lines.
/// - `preview` is the user message preview text.
/// - `model_provider` optionally sets the provider in the session meta payload.
///
/// Returns the generated conversation/session UUID as a string.
pub fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ConversationId::from_string(&uuid_str)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let payload = serde_json::to_value(SessionMeta {
id: conversation_id,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
})?;
let lines = [
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
json!({
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"kind": "plain"
}
})
.to_string(),
];
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}

View File

@@ -12,7 +12,7 @@ use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(20);
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> {

View File

@@ -146,7 +146,7 @@ fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"

View File

@@ -1,6 +1,5 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
@@ -16,8 +15,12 @@ use codex_core::protocol::EventMsg;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::fs;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -354,3 +357,70 @@ async fn test_list_and_resume_conversations() -> Result<()> {
Ok(())
}
fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
) -> Result<()> {
let uuid = Uuid::new_v4();
// sessions/YYYY/MM/DD/ derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
let mut lines = Vec::new();
// Meta line with timestamp (flattened meta in payload for new schema)
let mut payload = json!({
"id": uuid,
"timestamp": meta_rfc3339,
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
});
if let Some(provider) = model_provider {
payload["model_provider"] = json!(provider);
}
lines.push(
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
);
// Minimal user message entry as a persisted response item (with envelope timestamp)
lines.push(
json!({
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
);
// Add a matching user message event line to satisfy filters
lines.push(
json!({
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"kind": "plain"
}
})
.to_string(),
);
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(())
}

View File

@@ -7,8 +7,9 @@ mod fuzzy_file_search;
mod interrupt;
mod list_resume;
mod login;
mod model_list;
mod rate_limits;
mod send_message;
mod set_default_model;
mod user_agent;
mod user_info;
mod v2;

View File

@@ -6,9 +6,9 @@ use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::config_types::ReasoningEffort;
@@ -19,7 +19,7 @@ use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -27,8 +27,8 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(100),
.send_list_models_request(ListModelsParams {
page_size: Some(100),
cursor: None,
})
.await?;
@@ -39,17 +39,14 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
)
.await??;
let ModelListResponse {
data: items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let ListModelsResponse { items, next_cursor } = to_response::<ListModelsResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
@@ -106,7 +103,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
Ok(())
}
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -114,8 +111,8 @@ async fn list_models_pagination_works() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: None,
})
.await?;
@@ -126,18 +123,18 @@ async fn list_models_pagination_works() -> Result<()> {
)
.await??;
let ModelListResponse {
data: first_items,
let ListModelsResponse {
items: first_items,
next_cursor: first_cursor,
} = to_response::<ModelListResponse>(first_response)?;
} = to_response::<ListModelsResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5-codex");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
@@ -148,10 +145,10 @@ async fn list_models_pagination_works() -> Result<()> {
)
.await??;
let ModelListResponse {
data: second_items,
let ListModelsResponse {
items: second_items,
next_cursor: second_cursor,
} = to_response::<ModelListResponse>(second_response)?;
} = to_response::<ListModelsResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5");
@@ -159,7 +156,7 @@ async fn list_models_pagination_works() -> Result<()> {
Ok(())
}
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_rejects_invalid_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -167,8 +164,8 @@ async fn list_models_rejects_invalid_cursor() -> Result<()> {
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: None,
.send_list_models_request(ListModelsParams {
page_size: None,
cursor: Some("invalid".to_string()),
})
.await?;

View File

@@ -7,10 +7,10 @@ use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
@@ -26,7 +26,7 @@ use wiremock::matchers::path;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_requires_auth() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -51,7 +51,7 @@ async fn get_account_rate_limits_requires_auth() -> Result<()> {
Ok(())
}
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -78,7 +78,7 @@ async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
Ok(())
}
#[tokio::test]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let codex_home = TempDir::new()?;
write_chatgpt_auth(
@@ -143,13 +143,13 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let expected = GetAccountRateLimitsResponse {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 42,
window_duration_mins: Some(60),
used_percent: 42.0,
window_minutes: Some(60),
resets_at: Some(primary_reset_timestamp),
}),
secondary: Some(RateLimitWindow {
used_percent: 5,
window_duration_mins: Some(1440),
used_percent: 5.0,
window_minutes: Some(1440),
resets_at: Some(secondary_reset_timestamp),
}),
},

View File

@@ -1,492 +0,0 @@
use anyhow::Result;
use anyhow::bail;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::ChatGptAuthFixture;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::Account;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAccountResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use codex_protocol::account::PlanType as AccountPlanType;
use pretty_assertions::assert_eq;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
// Helper to create a minimal config.toml for the app server
#[derive(Default)]
struct CreateConfigTomlParams {
forced_method: Option<String>,
forced_workspace_id: Option<String>,
requires_openai_auth: Option<bool>,
}
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let forced_line = if let Some(method) = params.forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
String::new()
};
let forced_workspace_line = if let Some(ws) = params.forced_workspace_id {
format!("forced_chatgpt_workspace_id = \"{ws}\"\n")
} else {
String::new()
};
let requires_line = match params.requires_openai_auth {
Some(true) => "requires_openai_auth = true\n".to_string(),
Some(false) => String::new(),
None => String::new(),
};
let contents = format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
{forced_line}
{forced_workspace_line}
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
{requires_line}
"#
);
std::fs::write(config_toml, contents)
}
#[tokio::test]
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
login_with_api_key(
codex_home.path(),
"sk-test-key",
AuthCredentialsStoreMode::File,
)?;
assert!(codex_home.path().join("auth.json").exists());
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let id = mcp.send_logout_account_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(id)),
)
.await??;
let _ok: LogoutAccountResponse = to_response(resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert!(
payload.auth_mode.is_none(),
"auth_method should be None after logout"
);
assert!(
!codex_home.path().join("auth.json").exists(),
"auth.json should be deleted"
);
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: false,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(account.account, None);
Ok(())
}
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
assert_eq!(login, LoginAccountResponse::ApiKey {});
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, None);
pretty_assertions::assert_eq!(payload.success, true);
pretty_assertions::assert_eq!(payload.error, None);
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey));
assert!(codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_method: Some("chatgpt".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"API key login is disabled. Use ChatGPT login instead."
);
Ok(())
}
#[tokio::test]
async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_method: Some("api".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"ChatGPT login is disabled. Use API key login instead."
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_start() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { login_id, auth_url } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("redirect_uri=http%3A%2F%2Flocalhost"),
"auth_url should contain a redirect_uri to localhost"
);
let cancel_id = mcp
.send_cancel_login_account_request(CancelLoginAccountParams {
login_id: login_id.clone(),
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginAccountResponse = to_response(cancel_resp)?;
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/login/completed"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountLoginCompleted(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
pretty_assertions::assert_eq!(payload.login_id, Some(login_id));
pretty_assertions::assert_eq!(payload.success, false);
assert!(
payload.error.is_some(),
"expected a non-empty error on cancel"
);
let maybe_updated = timeout(
Duration::from_millis(500),
mcp.read_stream_until_notification_message("account/updated"),
)
.await;
assert!(
maybe_updated.is_err(),
"account/updated should not be emitted when login is cancelled"
);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_workspace_id: Some("ws-forced".to_string()),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { auth_url, .. } = login else {
bail!("unexpected login response: {login:?}");
};
assert!(
auth_url.contains("allowed_workspace_id=ws-forced"),
"auth URL should include forced workspace"
);
Ok(())
}
#[tokio::test]
async fn get_account_no_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let account: GetAccountResponse = to_response(resp)?;
assert_eq!(account.account, None, "expected no account");
assert_eq!(account.requires_openai_auth, true);
Ok(())
}
#[tokio::test]
async fn get_account_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let _login_ok = to_response::<LoginAccountResponse>(resp)?;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::ApiKey {}),
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_when_auth_not_required() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(false),
..Default::default()
},
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: None,
requires_openai_auth: false,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.email("user@example.com")
.plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}

View File

@@ -1,9 +0,0 @@
mod account;
mod model_list;
mod rate_limits;
mod thread_archive;
mod thread_list;
mod thread_resume;
mod thread_start;
mod turn_interrupt;
mod turn_start;

View File

@@ -1,93 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_core::find_conversation_path_by_id_str;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
assert!(!thread.id.is_empty());
// Locate the rollout path recorded for this thread id.
let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected rollout path for thread id to exist");
assert!(
rollout_path.exists(),
"expected {} to exist",
rollout_path.display()
);
// Archive the thread.
let archive_id = mcp
.send_thread_archive_request(ThreadArchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let archive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_id)),
)
.await??;
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
// Verify file moved.
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
// The archived file keeps the original filename (rollout-...-<id>.jsonl).
let archived_rollout_path =
archived_directory.join(rollout_path.file_name().expect("rollout file name"));
assert!(
!rollout_path.exists(),
"expected rollout path {} to be moved",
rollout_path.display()
);
assert!(
archived_rollout_path.exists(),
"expected archived rollout path {} to exist",
archived_rollout_path.display()
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, config_contents())
}
fn config_contents() -> &'static str {
r#"model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
"#
}

View File

@@ -1,220 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: None,
})
.await?;
let list_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
assert!(data.is_empty());
assert_eq!(next_cursor, None);
Ok(())
}
// Minimal config.toml for listing.
fn create_minimal_config(codex_home: &std::path::Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
r#"
model = "mock-model"
approval_policy = "never"
"#,
)
}
#[tokio::test]
async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create three rollouts so we can paginate with limit=2.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let _b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let _c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Page 1: limit 2 → expect next_cursor Some.
let page1_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page1_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
)
.await??;
let ThreadListResponse {
data: data1,
next_cursor: cursor1,
} = to_response::<ThreadListResponse>(page1_resp)?;
assert_eq!(data1.len(), 2);
for thread in &data1 {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
}
let cursor1 = cursor1.expect("expected nextCursor on first page");
// Page 2: with cursor → expect next_cursor None when no more results.
let page2_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: Some(cursor1),
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page2_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
)
.await??;
let ThreadListResponse {
data: data2,
next_cursor: cursor2,
} = to_response::<ThreadListResponse>(page2_resp)?;
assert!(data2.len() <= 2);
for thread in &data2 {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
}
assert_eq!(cursor2, None, "expected nextCursor to be null on last page");
Ok(())
}
#[tokio::test]
async fn thread_list_respects_provider_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Create rollouts under two providers.
let _a = create_fake_rollout(
codex_home.path(),
"2025-01-02T10-00-00",
"2025-01-02T10:00:00Z",
"X",
Some("mock_provider"),
)?; // mock_provider
// one with a different provider
let uuid = Uuid::new_v4();
let dir = codex_home
.path()
.join("sessions")
.join("2025")
.join("01")
.join("02");
std::fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-2025-01-02T11-00-00-{uuid}.jsonl"));
let lines = [
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type": "session_meta",
"payload": {
"id": uuid,
"timestamp": "2025-01-02T11:00:00Z",
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
"source": "vscode",
"model_provider": "other_provider"
}
})
.to_string(),
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type":"response_item",
"payload": {"type":"message","role":"user","content":[{"type":"input_text","text":"X"}]}
})
.to_string(),
json!({
"timestamp": "2025-01-02T11:00:00Z",
"type":"event_msg",
"payload": {"type":"user_message","message":"X","kind":"plain"}
})
.to_string(),
];
std::fs::write(file_path, lines.join("\n") + "\n")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Filter to only other_provider; expect 1 item, nextCursor None.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["other_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
assert_eq!(data.len(), 1);
assert_eq!(next_cursor, None);
let thread = &data[0];
assert_eq!(thread.preview, "X");
assert_eq!(thread.model_provider, "other_provider");
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_ts);
Ok(())
}

View File

@@ -1,79 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_resume_returns_existing_thread() -> Result<()> {
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// Resume it via v2 API.
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread: resumed } =
to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(resumed, thread);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,90 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
// Start server and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread with an explicit model override.
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5".to_string()),
..Default::default()
})
.await?;
// Expect a proper JSON-RPC response with a thread id.
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(resp)?;
assert!(!thread.id.is_empty(), "thread id should not be empty");
assert!(
thread.preview.is_empty(),
"new threads should start with an empty preview"
);
assert_eq!(thread.model_provider, "mock_provider");
assert!(
thread.created_at > 0,
"created_at should be a positive UNIX timestamp"
);
// A corresponding thread/started notification should arrive.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("thread/started"),
)
.await??;
let started: ThreadStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(started.thread, thread);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,128 +0,0 @@
#![cfg(unix)]
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_interrupt_aborts_running_turn() -> Result<()> {
// Use a portable sleep command to keep the turn running.
#[cfg(target_os = "windows")]
let shell_command = vec![
"powershell".to_string(),
"-Command".to_string(),
"Start-Sleep -Seconds 10".to_string(),
];
#[cfg(not(target_os = "windows"))]
let shell_command = vec!["sleep".to_string(), "10".to_string()];
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
// Mock server: long-running shell command then (after abort) nothing else needed.
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000),
"call_sleep",
)?])
.await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a v2 thread and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn that triggers a long-running command.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run sleep".to_string(),
}],
cwd: Some(working_directory.clone()),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Give the command a brief moment to start.
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Interrupt the in-progress turn by id (v2 API).
let interrupt_id = mcp
.send_turn_interrupt_request(TurnInterruptParams {
thread_id: thread.id,
turn_id: turn.id,
})
.await?;
let interrupt_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)),
)
.await??;
let _resp: TurnInterruptResponse = to_response::<TurnInterruptResponse>(interrupt_resp)?;
// No fields to assert on; successful deserialization confirms proper response shape.
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "workspace-write"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,486 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::create_shell_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
// Three Codex turns hit the mock model (session start + two turn/start calls).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Start a thread (v2) and capture its id.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn with only input and thread_id set (no overrides).
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// Expect a turn/started notification.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
let started: TurnStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(
started.turn.status,
codex_app_server_protocol::TurnStatus::InProgress
);
// Send a second turn that exercises the overrides path: change the model.
let turn_req2 = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
}],
model: Some("mock-model-override".to_string()),
..Default::default()
})
.await?;
let turn_resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req2)),
)
.await??;
let TurnStartResponse { turn: turn2 } = to_response::<TurnStartResponse>(turn_resp2)?;
assert!(!turn2.id.is_empty());
// Ensure the second turn has a different id than the first.
assert_ne!(turn.id, turn2.id);
// Expect a second turn/started notification as well.
let _notif2: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/started"),
)
.await??;
// And we should ultimately get a task_complete without having to add a
// legacy conversation listener explicitly (auto-attached by thread/start).
let _task_complete: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
// Use the unchecked variant because the request payload includes a LocalImage
// which the strict matcher does not currently cover.
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(thread_resp)?;
let image_path = codex_home.path().join("image.png");
// No need to actually write the file; we just exercise the input path.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::LocalImage { path: image_path }],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert!(!turn.id.is_empty());
// This test only validates that turn/start responds and returns a turn.
Ok(())
}
#[tokio::test]
async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().to_path_buf();
// Mock server: first turn requests a shell call (elicitation), then completes.
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
let responses = vec![
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
create_shell_sse_response(
vec![
"python3".to_string(),
"-c".to_string(),
"print(42)".to_string(),
],
None,
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// turn/start — expect ExecCommandApproval request from server
let first_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
}],
..Default::default()
})
.await?;
// Acknowledge RPC
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
// Receive elicitation
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ExecCommandApproval { request_id, params } = server_req else {
panic!("expected ExecCommandApproval request");
};
assert_eq!(params.call_id, "call1");
assert_eq!(
params.parsed_cmd,
vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}]
);
// Approve and wait for task completion
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// Second turn with approval_policy=never should not elicit approval
let second_turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python again".to_string(),
}],
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
// Ensure we do NOT receive an ExecCommandApproval request before task completes
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
// When returning Result from a test, pass an Ok(()) to the skip macro
// so the early return type matches. The no-arg form returns unit.
skip_if_no_network!(Ok(()));
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
let responses = vec![
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo first turn".to_string(),
],
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
create_shell_sse_response(
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string(),
],
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// thread/start
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(start_resp)?;
// first turn with workspace-write sandbox and first_cwd
let first_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
}],
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
}),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
// second turn with workspace-write and second_cwd, ensure exec begins in second_cwd
let second_turn = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
}],
cwd: Some(second_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn)),
)
.await??;
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
let params = exec_begin_notification
.params
.clone()
.expect("exec_command_begin params");
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
let exec_begin = match event.msg {
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
other => panic!("expected ExecCommandBegin event, got {other:?}"),
};
assert_eq!(exec_begin.cwd, second_cwd);
assert_eq!(
exec_begin.command,
vec![
"bash".to_string(),
"-lc".to_string(),
"echo second turn".to_string()
]
);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -288,7 +288,7 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
path,
ApplyPatchFileChange::Update {
unified_diff,
move_path: move_path.map(|p| effective_cwd.join(p)),
move_path: move_path.map(|p| cwd.join(p)),
new_content: contents,
},
);
@@ -1603,53 +1603,6 @@ g
);
}
#[test]
fn test_apply_patch_resolves_move_path_with_effective_cwd() {
let session_dir = tempdir().unwrap();
let worktree_rel = "alt";
let worktree_dir = session_dir.path().join(worktree_rel);
fs::create_dir_all(&worktree_dir).unwrap();
let source_name = "old.txt";
let dest_name = "renamed.txt";
let source_path = worktree_dir.join(source_name);
fs::write(&source_path, "before\n").unwrap();
let patch = wrap_patch(&format!(
r#"*** Update File: {source_name}
*** Move to: {dest_name}
@@
-before
+after"#
));
let shell_script = format!("cd {worktree_rel} && apply_patch <<'PATCH'\n{patch}\nPATCH");
let argv = vec!["bash".into(), "-lc".into(), shell_script];
let result = maybe_parse_apply_patch_verified(&argv, session_dir.path());
let action = match result {
MaybeApplyPatchVerified::Body(action) => action,
other => panic!("expected verified body, got {other:?}"),
};
assert_eq!(action.cwd, worktree_dir);
let change = action
.changes()
.get(&worktree_dir.join(source_name))
.expect("source file change present");
match change {
ApplyPatchFileChange::Update { move_path, .. } => {
assert_eq!(
move_path.as_deref(),
Some(worktree_dir.join(dest_name).as_path())
);
}
other => panic!("expected update change, got {other:?}"),
}
}
#[test]
fn test_apply_patch_fails_on_write_error() {
let dir = tempdir().unwrap();

View File

@@ -11,7 +11,32 @@ const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox";
const APPLY_PATCH_ARG0: &str = "apply_patch";
const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch";
pub fn arg0_dispatch() -> Option<TempDir> {
/// While we want to deploy the Codex CLI as a single executable for simplicity,
/// we also want to expose some of its functionality as distinct CLIs, so we use
/// the "arg0 trick" to determine which CLI to dispatch. This effectively allows
/// us to simulate deploying multiple executables as a single binary on Mac and
/// Linux (but not Windows).
///
/// When the current executable is invoked through the hard-link or alias named
/// `codex-linux-sandbox` we *directly* execute
/// [`codex_linux_sandbox::run_main`] (which never returns). Otherwise we:
///
/// 1. Load `.env` values from `~/.codex/.env` before creating any threads.
/// 2. Construct a Tokio multi-thread runtime.
/// 3. Derive the path to the current executable (so children can re-invoke the
/// sandbox) when running on Linux.
/// 4. Execute the provided async `main_fn` inside that runtime, forwarding any
/// error. Note that `main_fn` receives `codex_linux_sandbox_exe:
/// Option<PathBuf>`, as an argument, which is generally needed as part of
/// constructing [`codex_core::config::Config`].
///
/// This function should be used to wrap any `main()` function in binary crates
/// in this workspace that depends on these helper CLIs.
pub fn arg0_dispatch_or_else<F, Fut>(main_fn: F) -> anyhow::Result<()>
where
F: FnOnce(Option<PathBuf>) -> Fut,
Fut: Future<Output = anyhow::Result<()>>,
{
// Determine if we were invoked via the special alias.
let mut args = std::env::args_os();
let argv0 = args.next().unwrap_or_default();
@@ -51,7 +76,10 @@ pub fn arg0_dispatch() -> Option<TempDir> {
// before creating any threads/the Tokio runtime.
load_dotenv();
match prepend_path_entry_for_codex_aliases() {
// Retain the TempDir so it exists for the lifetime of the invocation of
// this executable. Admittedly, we could invoke `keep()` on it, but it
// would be nice to avoid leaving temporary directories behind, if possible.
let _path_entry = match prepend_path_entry_for_apply_patch() {
Ok(path_entry) => Some(path_entry),
Err(err) => {
// It is possible that Codex will proceed successfully even if
@@ -59,39 +87,7 @@ pub fn arg0_dispatch() -> Option<TempDir> {
eprintln!("WARNING: proceeding, even though we could not update PATH: {err}");
None
}
}
}
/// While we want to deploy the Codex CLI as a single executable for simplicity,
/// we also want to expose some of its functionality as distinct CLIs, so we use
/// the "arg0 trick" to determine which CLI to dispatch. This effectively allows
/// us to simulate deploying multiple executables as a single binary on Mac and
/// Linux (but not Windows).
///
/// When the current executable is invoked through the hard-link or alias named
/// `codex-linux-sandbox` we *directly* execute
/// [`codex_linux_sandbox::run_main`] (which never returns). Otherwise we:
///
/// 1. Load `.env` values from `~/.codex/.env` before creating any threads.
/// 2. Construct a Tokio multi-thread runtime.
/// 3. Derive the path to the current executable (so children can re-invoke the
/// sandbox) when running on Linux.
/// 4. Execute the provided async `main_fn` inside that runtime, forwarding any
/// error. Note that `main_fn` receives `codex_linux_sandbox_exe:
/// Option<PathBuf>`, as an argument, which is generally needed as part of
/// constructing [`codex_core::config::Config`].
///
/// This function should be used to wrap any `main()` function in binary crates
/// in this workspace that depends on these helper CLIs.
pub fn arg0_dispatch_or_else<F, Fut>(main_fn: F) -> anyhow::Result<()>
where
F: FnOnce(Option<PathBuf>) -> Fut,
Fut: Future<Output = anyhow::Result<()>>,
{
// Retain the TempDir so it exists for the lifetime of the invocation of
// this executable. Admittedly, we could invoke `keep()` on it, but it
// would be nice to avoid leaving temporary directories behind, if possible.
let _path_entry = arg0_dispatch();
};
// Regular invocation create a Tokio runtime and execute the provided
// async entry-point.
@@ -148,16 +144,11 @@ where
///
/// IMPORTANT: This function modifies the PATH environment variable, so it MUST
/// be called before multiple threads are spawned.
pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<TempDir> {
fn prepend_path_entry_for_apply_patch() -> std::io::Result<TempDir> {
let temp_dir = TempDir::new()?;
let path = temp_dir.path();
for filename in &[
APPLY_PATCH_ARG0,
MISSPELLED_APPLY_PATCH_ARG0,
#[cfg(target_os = "linux")]
LINUX_SANDBOX_ARG0,
] {
for filename in &[APPLY_PATCH_ARG0, MISSPELLED_APPLY_PATCH_ARG0] {
let exe = std::env::current_exe()?;
#[cfg(unix)]

View File

@@ -30,17 +30,15 @@ codex-login = { workspace = true }
codex-mcp-server = { workspace = true }
codex-process-hardening = { workspace = true }
codex-protocol = { workspace = true }
codex-protocol-ts = { workspace = true }
codex-responses-api-proxy = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
ctor = { workspace = true }
libc = { workspace = true }
owo-colors = { workspace = true }
regex-lite = { workspace = true}
serde_json = { workspace = true }
supports-color = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
"macros",
@@ -48,7 +46,6 @@ tokio = { workspace = true, features = [
"rt-multi-thread",
"signal",
] }
tracing = { workspace = true }
[target.'cfg(target_os = "windows")'.dependencies]
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }

View File

@@ -1,8 +1,3 @@
#[cfg(target_os = "macos")]
mod pid_tracker;
#[cfg(target_os = "macos")]
mod seatbelt;
use std::path::PathBuf;
use codex_common::CliConfigOverrides;
@@ -10,7 +5,6 @@ use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::exec_env::create_env;
use codex_core::landlock::spawn_command_under_linux_sandbox;
#[cfg(target_os = "macos")]
use codex_core::seatbelt::spawn_command_under_seatbelt;
use codex_core::spawn::StdioPolicy;
use codex_protocol::config_types::SandboxMode;
@@ -20,17 +14,12 @@ use crate::SeatbeltCommand;
use crate::WindowsCommand;
use crate::exit_status::handle_exit_status;
#[cfg(target_os = "macos")]
use seatbelt::DenialLogger;
#[cfg(target_os = "macos")]
pub async fn run_command_under_seatbelt(
command: SeatbeltCommand,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> anyhow::Result<()> {
let SeatbeltCommand {
full_auto,
log_denials,
config_overrides,
command,
} = command;
@@ -40,19 +29,10 @@ pub async fn run_command_under_seatbelt(
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Seatbelt,
log_denials,
)
.await
}
#[cfg(not(target_os = "macos"))]
pub async fn run_command_under_seatbelt(
_command: SeatbeltCommand,
_codex_linux_sandbox_exe: Option<PathBuf>,
) -> anyhow::Result<()> {
anyhow::bail!("Seatbelt sandbox is only available on macOS");
}
pub async fn run_command_under_landlock(
command: LandlockCommand,
codex_linux_sandbox_exe: Option<PathBuf>,
@@ -68,7 +48,6 @@ pub async fn run_command_under_landlock(
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Landlock,
false,
)
.await
}
@@ -88,13 +67,11 @@ pub async fn run_command_under_windows(
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Windows,
false,
)
.await
}
enum SandboxType {
#[cfg(target_os = "macos")]
Seatbelt,
Landlock,
Windows,
@@ -106,7 +83,6 @@ async fn run_command_under_sandbox(
config_overrides: CliConfigOverrides,
codex_linux_sandbox_exe: Option<PathBuf>,
sandbox_type: SandboxType,
log_denials: bool,
) -> anyhow::Result<()> {
let sandbox_mode = create_sandbox_mode(full_auto);
let config = Config::load_with_cli_overrides(
@@ -148,9 +124,6 @@ async fn run_command_under_sandbox(
let cwd_clone = cwd.clone();
let env_map = env.clone();
let command_vec = command.clone();
let base_dir = config.codex_home.clone();
// Preflight audit is invoked elsewhere at the appropriate times.
let res = tokio::task::spawn_blocking(move || {
run_windows_sandbox_capture(
policy_str,
@@ -159,7 +132,6 @@ async fn run_command_under_sandbox(
&cwd_clone,
env_map,
None,
Some(base_dir.as_path()),
)
})
.await;
@@ -193,13 +165,7 @@ async fn run_command_under_sandbox(
}
}
#[cfg(target_os = "macos")]
let mut denial_logger = log_denials.then(DenialLogger::new).flatten();
#[cfg(not(target_os = "macos"))]
let _ = log_denials;
let mut child = match sandbox_type {
#[cfg(target_os = "macos")]
SandboxType::Seatbelt => {
spawn_command_under_seatbelt(
command,
@@ -231,27 +197,8 @@ async fn run_command_under_sandbox(
unreachable!("Windows sandbox should have been handled above");
}
};
#[cfg(target_os = "macos")]
if let Some(denial_logger) = &mut denial_logger {
denial_logger.on_child_spawn(&child);
}
let status = child.wait().await?;
#[cfg(target_os = "macos")]
if let Some(denial_logger) = denial_logger {
let denials = denial_logger.finish().await;
eprintln!("\n=== Sandbox denials ===");
if denials.is_empty() {
eprintln!("None found.");
} else {
for seatbelt::SandboxDenial { name, capability } in denials {
eprintln!("({name}) {capability}");
}
}
}
handle_exit_status(status);
}

View File

@@ -1,372 +0,0 @@
use std::collections::HashSet;
use tokio::task::JoinHandle;
use tracing::warn;
/// Tracks the (recursive) descendants of a process by using `kqueue` to watch for fork events, and
/// `proc_listchildpids` to list the children of a process.
pub(crate) struct PidTracker {
kq: libc::c_int,
handle: JoinHandle<HashSet<i32>>,
}
impl PidTracker {
pub(crate) fn new(root_pid: i32) -> Option<Self> {
if root_pid <= 0 {
return None;
}
let kq = unsafe { libc::kqueue() };
let handle = tokio::task::spawn_blocking(move || track_descendants(kq, root_pid));
Some(Self { kq, handle })
}
pub(crate) async fn stop(self) -> HashSet<i32> {
trigger_stop_event(self.kq);
self.handle.await.unwrap_or_default()
}
}
unsafe extern "C" {
fn proc_listchildpids(
ppid: libc::c_int,
buffer: *mut libc::c_void,
buffersize: libc::c_int,
) -> libc::c_int;
}
/// Wrap proc_listchildpids.
fn list_child_pids(parent: i32) -> Vec<i32> {
unsafe {
let mut capacity: usize = 16;
loop {
let mut buf: Vec<i32> = vec![0; capacity];
let count = proc_listchildpids(
parent as libc::c_int,
buf.as_mut_ptr() as *mut libc::c_void,
(buf.len() * std::mem::size_of::<i32>()) as libc::c_int,
);
if count <= 0 {
return Vec::new();
}
let returned = count as usize;
if returned < capacity {
buf.truncate(returned);
return buf;
}
capacity = capacity.saturating_mul(2).max(returned + 16);
}
}
}
fn pid_is_alive(pid: i32) -> bool {
if pid <= 0 {
return false;
}
let res = unsafe { libc::kill(pid as libc::pid_t, 0) };
if res == 0 {
true
} else {
matches!(
std::io::Error::last_os_error().raw_os_error(),
Some(libc::EPERM)
)
}
}
enum WatchPidError {
ProcessGone,
Other(std::io::Error),
}
/// Add `pid` to the watch list in `kq`.
fn watch_pid(kq: libc::c_int, pid: i32) -> Result<(), WatchPidError> {
if pid <= 0 {
return Err(WatchPidError::ProcessGone);
}
let kev = libc::kevent {
ident: pid as libc::uintptr_t,
filter: libc::EVFILT_PROC,
flags: libc::EV_ADD | libc::EV_CLEAR,
fflags: libc::NOTE_FORK | libc::NOTE_EXEC | libc::NOTE_EXIT,
data: 0,
udata: std::ptr::null_mut(),
};
let res = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) };
if res < 0 {
let err = std::io::Error::last_os_error();
if err.raw_os_error() == Some(libc::ESRCH) {
Err(WatchPidError::ProcessGone)
} else {
Err(WatchPidError::Other(err))
}
} else {
Ok(())
}
}
fn watch_children(
kq: libc::c_int,
parent: i32,
seen: &mut HashSet<i32>,
active: &mut HashSet<i32>,
) {
for child_pid in list_child_pids(parent) {
add_pid_watch(kq, child_pid, seen, active);
}
}
/// Watch `pid` and its children, updating `seen` and `active` sets.
fn add_pid_watch(kq: libc::c_int, pid: i32, seen: &mut HashSet<i32>, active: &mut HashSet<i32>) {
if pid <= 0 {
return;
}
let newly_seen = seen.insert(pid);
let mut should_recurse = newly_seen;
if active.insert(pid) {
match watch_pid(kq, pid) {
Ok(()) => {
should_recurse = true;
}
Err(WatchPidError::ProcessGone) => {
active.remove(&pid);
return;
}
Err(WatchPidError::Other(err)) => {
warn!("failed to watch pid {pid}: {err}");
active.remove(&pid);
return;
}
}
}
if should_recurse {
watch_children(kq, pid, seen, active);
}
}
const STOP_IDENT: libc::uintptr_t = 1;
fn register_stop_event(kq: libc::c_int) -> bool {
let kev = libc::kevent {
ident: STOP_IDENT,
filter: libc::EVFILT_USER,
flags: libc::EV_ADD | libc::EV_CLEAR,
fflags: 0,
data: 0,
udata: std::ptr::null_mut(),
};
let res = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) };
res >= 0
}
fn trigger_stop_event(kq: libc::c_int) {
if kq < 0 {
return;
}
let kev = libc::kevent {
ident: STOP_IDENT,
filter: libc::EVFILT_USER,
flags: 0,
fflags: libc::NOTE_TRIGGER,
data: 0,
udata: std::ptr::null_mut(),
};
let _ = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) };
}
/// Put all of the above together to track all the descendants of `root_pid`.
fn track_descendants(kq: libc::c_int, root_pid: i32) -> HashSet<i32> {
if kq < 0 {
let mut seen = HashSet::new();
seen.insert(root_pid);
return seen;
}
if !register_stop_event(kq) {
let mut seen = HashSet::new();
seen.insert(root_pid);
let _ = unsafe { libc::close(kq) };
return seen;
}
let mut seen: HashSet<i32> = HashSet::new();
let mut active: HashSet<i32> = HashSet::new();
add_pid_watch(kq, root_pid, &mut seen, &mut active);
const EVENTS_CAP: usize = 32;
let mut events: [libc::kevent; EVENTS_CAP] =
unsafe { std::mem::MaybeUninit::zeroed().assume_init() };
let mut stop_requested = false;
loop {
if active.is_empty() {
if !pid_is_alive(root_pid) {
break;
}
add_pid_watch(kq, root_pid, &mut seen, &mut active);
if active.is_empty() {
continue;
}
}
let nev = unsafe {
libc::kevent(
kq,
std::ptr::null::<libc::kevent>(),
0,
events.as_mut_ptr(),
EVENTS_CAP as libc::c_int,
std::ptr::null(),
)
};
if nev < 0 {
let err = std::io::Error::last_os_error();
if err.kind() == std::io::ErrorKind::Interrupted {
continue;
}
break;
}
if nev == 0 {
continue;
}
for ev in events.iter().take(nev as usize) {
let pid = ev.ident as i32;
if ev.filter == libc::EVFILT_USER && ev.ident == STOP_IDENT {
stop_requested = true;
break;
}
if (ev.flags & libc::EV_ERROR) != 0 {
if ev.data == libc::ESRCH as isize {
active.remove(&pid);
}
continue;
}
if (ev.fflags & libc::NOTE_FORK) != 0 {
watch_children(kq, pid, &mut seen, &mut active);
}
if (ev.fflags & libc::NOTE_EXIT) != 0 {
active.remove(&pid);
}
}
if stop_requested {
break;
}
}
let _ = unsafe { libc::close(kq) };
seen
}
#[cfg(test)]
mod tests {
use super::*;
use std::process::Command;
use std::process::Stdio;
use std::time::Duration;
#[test]
fn pid_is_alive_detects_current_process() {
let pid = std::process::id() as i32;
assert!(pid_is_alive(pid));
}
#[cfg(target_os = "macos")]
#[test]
fn list_child_pids_includes_spawned_child() {
let mut child = Command::new("/bin/sleep")
.arg("5")
.stdin(Stdio::null())
.spawn()
.expect("failed to spawn child process");
let child_pid = child.id() as i32;
let parent_pid = std::process::id() as i32;
let mut found = false;
for _ in 0..100 {
if list_child_pids(parent_pid).contains(&child_pid) {
found = true;
break;
}
std::thread::sleep(Duration::from_millis(10));
}
let _ = child.kill();
let _ = child.wait();
assert!(found, "expected to find child pid {child_pid} in list");
}
#[cfg(target_os = "macos")]
#[tokio::test]
async fn pid_tracker_collects_spawned_children() {
let tracker = PidTracker::new(std::process::id() as i32).expect("failed to create tracker");
let mut child = Command::new("/bin/sleep")
.arg("0.1")
.stdin(Stdio::null())
.spawn()
.expect("failed to spawn child process");
let child_pid = child.id() as i32;
let parent_pid = std::process::id() as i32;
let _ = child.wait();
let seen = tracker.stop().await;
assert!(
seen.contains(&parent_pid),
"expected tracker to include parent pid {parent_pid}"
);
assert!(
seen.contains(&child_pid),
"expected tracker to include child pid {child_pid}"
);
}
#[cfg(target_os = "macos")]
#[tokio::test]
async fn pid_tracker_collects_bash_subshell_descendants() {
let tracker = PidTracker::new(std::process::id() as i32).expect("failed to create tracker");
let child = Command::new("/bin/bash")
.arg("-c")
.arg("(sleep 0.1 & echo $!; wait)")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.expect("failed to spawn bash");
let output = child.wait_with_output().unwrap().stdout;
let subshell_pid = String::from_utf8_lossy(&output)
.trim()
.parse::<i32>()
.expect("failed to parse subshell pid");
let seen = tracker.stop().await;
assert!(
seen.contains(&subshell_pid),
"expected tracker to include subshell pid {subshell_pid}"
);
}
}

View File

@@ -1,114 +0,0 @@
use std::collections::HashSet;
use tokio::io::AsyncBufReadExt;
use tokio::process::Child;
use tokio::task::JoinHandle;
use super::pid_tracker::PidTracker;
pub struct SandboxDenial {
pub name: String,
pub capability: String,
}
pub struct DenialLogger {
log_stream: Child,
pid_tracker: Option<PidTracker>,
log_reader: Option<JoinHandle<Vec<u8>>>,
}
impl DenialLogger {
pub(crate) fn new() -> Option<Self> {
let mut log_stream = start_log_stream()?;
let stdout = log_stream.stdout.take()?;
let log_reader = tokio::spawn(async move {
let mut reader = tokio::io::BufReader::new(stdout);
let mut logs = Vec::new();
let mut chunk = Vec::new();
loop {
match reader.read_until(b'\n', &mut chunk).await {
Ok(0) | Err(_) => break,
Ok(_) => {
logs.extend_from_slice(&chunk);
chunk.clear();
}
}
}
logs
});
Some(Self {
log_stream,
pid_tracker: None,
log_reader: Some(log_reader),
})
}
pub(crate) fn on_child_spawn(&mut self, child: &Child) {
if let Some(root_pid) = child.id() {
self.pid_tracker = PidTracker::new(root_pid as i32);
}
}
pub(crate) async fn finish(mut self) -> Vec<SandboxDenial> {
let pid_set = match self.pid_tracker {
Some(tracker) => tracker.stop().await,
None => Default::default(),
};
if pid_set.is_empty() {
return Vec::new();
}
let _ = self.log_stream.kill().await;
let _ = self.log_stream.wait().await;
let logs_bytes = match self.log_reader.take() {
Some(handle) => handle.await.unwrap_or_default(),
None => Vec::new(),
};
let logs = String::from_utf8_lossy(&logs_bytes);
let mut seen: HashSet<(String, String)> = HashSet::new();
let mut denials: Vec<SandboxDenial> = Vec::new();
for line in logs.lines() {
if let Ok(json) = serde_json::from_str::<serde_json::Value>(line)
&& let Some(msg) = json.get("eventMessage").and_then(|v| v.as_str())
&& let Some((pid, name, capability)) = parse_message(msg)
&& pid_set.contains(&pid)
&& seen.insert((name.clone(), capability.clone()))
{
denials.push(SandboxDenial { name, capability });
}
}
denials
}
}
fn start_log_stream() -> Option<Child> {
use std::process::Stdio;
const PREDICATE: &str = r#"(((processID == 0) AND (senderImagePath CONTAINS "/Sandbox")) OR (subsystem == "com.apple.sandbox.reporting"))"#;
tokio::process::Command::new("log")
.args(["stream", "--style", "ndjson", "--predicate", PREDICATE])
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.kill_on_drop(true)
.spawn()
.ok()
}
fn parse_message(msg: &str) -> Option<(i32, String, String)> {
// Example message:
// Sandbox: processname(1234) deny(1) capability-name args...
static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new();
let re = RE.get_or_init(|| {
#[expect(clippy::unwrap_used)]
regex_lite::Regex::new(r"^Sandbox:\s*(.+?)\((\d+)\)\s+deny\(.*?\)\s*(.+)$").unwrap()
});
let (_, [name, pid_str, capability]) = re.captures(msg)?.extract();
let pid = pid_str.trim().parse::<i32>().ok()?;
Some((pid, name.to_string(), capability.to_string()))
}

View File

@@ -11,10 +11,6 @@ pub struct SeatbeltCommand {
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
/// While the command runs, capture macOS sandbox denials via `log stream` and print them after exit
#[arg(long = "log-denials", default_value_t = false)]
pub log_denials: bool,
#[clap(skip)]
pub config_overrides: CliConfigOverrides,

View File

@@ -1,4 +1,3 @@
use clap::Args;
use clap::CommandFactory;
use clap::Parser;
use clap_complete::Shell;
@@ -21,17 +20,14 @@ use codex_exec::Cli as ExecCli;
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
use codex_tui::update_action::UpdateAction;
use codex_tui::updates::UpdateAction;
use owo_colors::OwoColorize;
use std::path::PathBuf;
use supports_color::Stream;
mod mcp_cmd;
#[cfg(not(windows))]
mod wsl_paths;
use crate::mcp_cmd::McpCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::features::is_known_feature_key;
@@ -83,8 +79,8 @@ enum Subcommand {
/// [experimental] Run the Codex MCP server (stdio transport).
McpServer,
/// [experimental] Run the app server or related tooling.
AppServer(AppServerCommand),
/// [experimental] Run the app server.
AppServer,
/// Generate shell completion scripts.
Completion(CompletionCommand),
@@ -100,6 +96,9 @@ enum Subcommand {
/// Resume a previous interactive session (picker by default; use --last to continue the most recent).
Resume(ResumeCommand),
/// Internal: generate TypeScript protocol bindings.
#[clap(hide = true)]
GenerateTs(GenerateTsCommand),
/// [EXPERIMENTAL] Browse tasks from Codex Cloud and apply changes locally.
#[clap(name = "cloud", alias = "cloud-tasks")]
Cloud(CloudTasksCli),
@@ -206,22 +205,6 @@ struct LogoutCommand {
}
#[derive(Debug, Parser)]
struct AppServerCommand {
/// Omit to run the app server; specify a subcommand for tooling.
#[command(subcommand)]
subcommand: Option<AppServerSubcommand>,
}
#[derive(Debug, clap::Subcommand)]
enum AppServerSubcommand {
/// [experimental] Generate TypeScript bindings for the app server protocol.
GenerateTs(GenerateTsCommand),
/// [experimental] Generate JSON Schema for the app server protocol.
GenerateJsonSchema(GenerateJsonSchemaCommand),
}
#[derive(Debug, Args)]
struct GenerateTsCommand {
/// Output directory where .ts files will be written
#[arg(short = 'o', long = "out", value_name = "DIR")]
@@ -232,13 +215,6 @@ struct GenerateTsCommand {
prettier: Option<PathBuf>,
}
#[derive(Debug, Args)]
struct GenerateJsonSchemaCommand {
/// Output directory where the schema bundle will be written
#[arg(short = 'o', long = "out", value_name = "DIR")]
out_dir: PathBuf,
}
#[derive(Debug, Parser)]
struct StdioToUdsCommand {
/// Path to the Unix domain socket to connect to.
@@ -291,30 +267,10 @@ fn handle_app_exit(exit_info: AppExitInfo) -> anyhow::Result<()> {
/// Run the update action and print the result.
fn run_update_action(action: UpdateAction) -> anyhow::Result<()> {
println!();
let (cmd, args) = action.command_args();
let cmd_str = action.command_str();
println!("Updating Codex via `{cmd_str}`...");
let status = {
#[cfg(windows)]
{
// On Windows, run via cmd.exe so .CMD/.BAT are correctly resolved (PATHEXT semantics).
std::process::Command::new("cmd")
.args(["/C", &cmd_str])
.status()?
}
#[cfg(not(windows))]
{
let (cmd, args) = action.command_args();
let command_path = crate::wsl_paths::normalize_for_wsl(cmd);
let normalized_args: Vec<String> = args
.iter()
.map(crate::wsl_paths::normalize_for_wsl)
.collect();
std::process::Command::new(&command_path)
.args(&normalized_args)
.status()?
}
};
let status = std::process::Command::new(cmd).args(args).status()?;
if !status.success() {
anyhow::bail!("`{cmd_str}` failed with status {status}");
}
@@ -431,20 +387,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone());
mcp_cli.run().await?;
}
Some(Subcommand::AppServer(app_server_cli)) => match app_server_cli.subcommand {
None => {
codex_app_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
}
Some(AppServerSubcommand::GenerateTs(gen_cli)) => {
codex_app_server_protocol::generate_ts(
&gen_cli.out_dir,
gen_cli.prettier.as_deref(),
)?;
}
Some(AppServerSubcommand::GenerateJsonSchema(gen_cli)) => {
codex_app_server_protocol::generate_json(&gen_cli.out_dir)?;
}
},
Some(Subcommand::AppServer) => {
codex_app_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
}
Some(Subcommand::Resume(ResumeCommand {
session_id,
last,
@@ -559,24 +504,21 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
tokio::task::spawn_blocking(move || codex_stdio_to_uds::run(socket_path.as_path()))
.await??;
}
Some(Subcommand::GenerateTs(gen_cli)) => {
codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?;
}
Some(Subcommand::Features(FeaturesCli { sub })) => match sub {
FeaturesSubcommand::List => {
// Respect root-level `-c` overrides plus top-level flags like `--profile`.
let mut cli_kv_overrides = root_config_overrides
let cli_kv_overrides = root_config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
// Honor `--search` via the new feature toggle.
if interactive.web_search {
cli_kv_overrides.push((
"features.web_search_request".to_string(),
toml::Value::Boolean(true),
));
}
// Thread through relevant top-level flags (at minimum, `--profile`).
// Also honor `--search` since it maps to a feature toggle.
let overrides = ConfigOverrides {
config_profile: interactive.config_profile.clone(),
tools_web_search_request: interactive.web_search.then_some(true),
..Default::default()
};

View File

@@ -353,9 +353,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
.context("failed to load configuration")?;
if !config.features.enabled(Feature::RmcpClient) {
bail!(
"OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
);
bail!("OAuth login is only supported when [feature].rmcp_client is true in config.toml.");
}
let LoginArgs { name, scopes } = login_args;

View File

@@ -1,76 +0,0 @@
use std::ffi::OsStr;
/// WSL-specific path helpers used by the updater logic.
///
/// See https://github.com/openai/codex/issues/6086.
pub fn is_wsl() -> bool {
#[cfg(target_os = "linux")]
{
if std::env::var_os("WSL_DISTRO_NAME").is_some() {
return true;
}
match std::fs::read_to_string("/proc/version") {
Ok(version) => version.to_lowercase().contains("microsoft"),
Err(_) => false,
}
}
#[cfg(not(target_os = "linux"))]
{
false
}
}
/// Convert a Windows absolute path (`C:\foo\bar` or `C:/foo/bar`) to a WSL mount path (`/mnt/c/foo/bar`).
/// Returns `None` if the input does not look like a Windows drive path.
pub fn win_path_to_wsl(path: &str) -> Option<String> {
let bytes = path.as_bytes();
if bytes.len() < 3
|| bytes[1] != b':'
|| !(bytes[2] == b'\\' || bytes[2] == b'/')
|| !bytes[0].is_ascii_alphabetic()
{
return None;
}
let drive = (bytes[0] as char).to_ascii_lowercase();
let tail = path[3..].replace('\\', "/");
if tail.is_empty() {
return Some(format!("/mnt/{drive}"));
}
Some(format!("/mnt/{drive}/{tail}"))
}
/// If under WSL and given a Windows-style path, return the equivalent `/mnt/<drive>/…` path.
/// Otherwise returns the input unchanged.
pub fn normalize_for_wsl<P: AsRef<OsStr>>(path: P) -> String {
let value = path.as_ref().to_string_lossy().to_string();
if !is_wsl() {
return value;
}
if let Some(mapped) = win_path_to_wsl(&value) {
return mapped;
}
value
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn win_to_wsl_basic() {
assert_eq!(
win_path_to_wsl(r"C:\Temp\codex.zip").as_deref(),
Some("/mnt/c/Temp/codex.zip")
);
assert_eq!(
win_path_to_wsl("D:/Work/codex.tgz").as_deref(),
Some("/mnt/d/Work/codex.tgz")
);
assert!(win_path_to_wsl("/home/user/codex").is_none());
}
#[test]
fn normalize_is_noop_on_unix_paths() {
assert_eq!(normalize_for_wsl("/home/u/x"), "/home/u/x");
}
}

View File

@@ -8,7 +8,6 @@ pub mod util;
pub use cli::Cli;
use anyhow::anyhow;
use codex_login::AuthManager;
use std::io::IsTerminal;
use std::io::Read;
use std::path::PathBuf;
@@ -57,8 +56,20 @@ async fn init_backend(user_agent_suffix: &str) -> anyhow::Result<BackendContext>
};
append_error_log(format!("startup: base_url={base_url} path_style={style}"));
let auth_manager = util::load_auth_manager().await;
let auth = match auth_manager.as_ref().and_then(AuthManager::auth) {
let auth = match codex_core::config::find_codex_home()
.ok()
.map(|home| {
let store_mode = codex_core::config::Config::load_from_base_config_with_overrides(
codex_core::config::ConfigToml::default(),
codex_core::config::ConfigOverrides::default(),
home.clone(),
)
.map(|cfg| cfg.cli_auth_credentials_store_mode)
.unwrap_or_default();
codex_login::AuthManager::new(home, false, store_mode)
})
.and_then(|am| am.auth())
{
Some(auth) => auth,
None => {
eprintln!(
@@ -1033,7 +1044,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
// Close task modal/pending apply if present before opening env modal
app.diff_overlay = None;
app.env_modal = Some(app::EnvModalState { query: String::new(), selected: 0 });
// Cache environments while the modal is open to avoid repeated fetches.
// Cache environments until user explicitly refreshes with 'r' inside the modal.
let should_fetch = app.environments.is_empty();
if should_fetch {
app.env_loading = true;
@@ -1104,7 +1115,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let _ = tx.send(evt);
});
} else {
app.status = "No environment selected".to_string();
app.status = "No environment selected (press 'e' to choose)".to_string();
}
}
needs_redraw = true;
@@ -1302,6 +1313,18 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
// Environment modal key handling
match key.code {
KeyCode::Esc => { app.env_modal = None; needs_redraw = true; }
KeyCode::Char('r') | KeyCode::Char('R') => {
// Trigger refresh of environments
app.env_loading = true; app.env_error = None; needs_redraw = true;
let _ = frame_tx.send(Instant::now() + Duration::from_millis(100));
let tx = tx.clone();
tokio::spawn(async move {
let base_url = crate::util::normalize_base_url(&std::env::var("CODEX_CLOUD_TASKS_BASE_URL").unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string()));
let headers = crate::util::build_chatgpt_headers().await;
let res = crate::env_detect::list_environments(&base_url, &headers).await;
let _ = tx.send(app::AppEvent::EnvironmentsLoaded(res));
});
}
KeyCode::Char(ch) if !key.modifiers.contains(KeyModifiers::CONTROL) && !key.modifiers.contains(KeyModifiers::ALT) => {
if let Some(m) = app.env_modal.as_mut() { m.query.push(ch); }
needs_redraw = true;
@@ -1408,7 +1431,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
}
KeyCode::Char('o') | KeyCode::Char('O') => {
app.env_modal = Some(app::EnvModalState { query: String::new(), selected: 0 });
// Cache environments while the modal is open to avoid repeated fetches.
// Cache environments until user explicitly refreshes with 'r' inside the modal.
let should_fetch = app.environments.is_empty();
if should_fetch { app.env_loading = true; app.env_error = None; }
needs_redraw = true;

View File

@@ -945,7 +945,9 @@ pub fn draw_env_modal(frame: &mut Frame, area: Rect, app: &mut App) {
// Subheader with usage hints (dim cyan)
let subheader = Paragraph::new(Line::from(
"Type to search, Enter select, Esc cancel".cyan().dim(),
"Type to search, Enter select, Esc cancel; r refresh"
.cyan()
.dim(),
))
.wrap(Wrap { trim: true });
frame.render_widget(subheader, rows[0]);

View File

@@ -2,10 +2,6 @@ use base64::Engine as _;
use chrono::Utc;
use reqwest::header::HeaderMap;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_login::AuthManager;
pub fn set_user_agent_suffix(suffix: &str) {
if let Ok(mut guard) = codex_core::default_client::USER_AGENT_SUFFIX.lock() {
guard.replace(suffix.to_string());
@@ -58,18 +54,6 @@ pub fn extract_chatgpt_account_id(token: &str) -> Option<String> {
.map(str::to_string)
}
pub async fn load_auth_manager() -> Option<AuthManager> {
// TODO: pass in cli overrides once cloud tasks properly support them.
let config = Config::load_with_cli_overrides(Vec::new(), ConfigOverrides::default())
.await
.ok()?;
Some(AuthManager::new(
config.codex_home,
false,
config.cli_auth_credentials_store_mode,
))
}
/// Build headers for ChatGPT-backed requests: `User-Agent`, optional `Authorization`,
/// and optional `ChatGPT-Account-Id`.
pub async fn build_chatgpt_headers() -> HeaderMap {
@@ -85,22 +69,31 @@ pub async fn build_chatgpt_headers() -> HeaderMap {
USER_AGENT,
HeaderValue::from_str(&ua).unwrap_or(HeaderValue::from_static("codex-cli")),
);
if let Some(am) = load_auth_manager().await
&& let Some(auth) = am.auth()
&& let Ok(tok) = auth.get_token().await
&& !tok.is_empty()
{
let v = format!("Bearer {tok}");
if let Ok(hv) = HeaderValue::from_str(&v) {
headers.insert(AUTHORIZATION, hv);
}
if let Some(acc) = auth
.get_account_id()
.or_else(|| extract_chatgpt_account_id(&tok))
&& let Ok(name) = HeaderName::from_bytes(b"ChatGPT-Account-Id")
&& let Ok(hv) = HeaderValue::from_str(&acc)
if let Ok(home) = codex_core::config::find_codex_home() {
let store_mode = codex_core::config::Config::load_from_base_config_with_overrides(
codex_core::config::ConfigToml::default(),
codex_core::config::ConfigOverrides::default(),
home.clone(),
)
.map(|cfg| cfg.cli_auth_credentials_store_mode)
.unwrap_or_default();
let am = codex_login::AuthManager::new(home, false, store_mode);
if let Some(auth) = am.auth()
&& let Ok(tok) = auth.get_token().await
&& !tok.is_empty()
{
headers.insert(name, hv);
let v = format!("Bearer {tok}");
if let Ok(hv) = HeaderValue::from_str(&v) {
headers.insert(AUTHORIZATION, hv);
}
if let Some(acc) = auth
.get_account_id()
.or_else(|| extract_chatgpt_account_id(&tok))
&& let Ok(name) = HeaderName::from_bytes(b"ChatGPT-Account-Id")
&& let Ok(hv) = HeaderValue::from_str(&acc)
{
headers.insert(name, hv);
}
}
}
headers

View File

@@ -19,8 +19,8 @@ use toml::Value;
pub struct CliConfigOverrides {
/// Override a configuration value that would otherwise be loaded from
/// `~/.codex/config.toml`. Use a dotted path (`foo.bar.baz`) to override
/// nested values. The `value` portion is parsed as TOML. If it fails to
/// parse as TOML, the raw string is used as a literal.
/// nested values. The `value` portion is parsed as JSON. If it fails to
/// parse as JSON, the raw string is used as a literal.
///
/// Examples:
/// - `-c model="o3"`
@@ -59,7 +59,7 @@ impl CliConfigOverrides {
return Err(format!("Empty key in override: {s}"));
}
// Attempt to parse as TOML. If that fails, treat it as a raw
// Attempt to parse as JSON. If that fails, treat it as a raw
// string. This allows convenient usage such as
// `-c model=o3` without the quotes.
let value: Value = match parse_toml_value(value_str) {
@@ -151,15 +151,6 @@ mod tests {
assert_eq!(v.as_integer(), Some(42));
}
#[test]
fn parses_bool() {
let true_literal = parse_toml_value("true").expect("parse");
assert_eq!(true_literal.as_bool(), Some(true));
let false_literal = parse_toml_value("false").expect("parse");
assert_eq!(false_literal.as_bool(), Some(false));
}
#[test]
fn fails_on_unquoted_string() {
assert!(parse_toml_value("hello").is_err());

View File

@@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for codex.",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
@@ -52,24 +52,6 @@ const PRESETS: &[ModelPreset] = &[
],
is_default: true,
},
ModelPreset {
id: "gpt-5-codex-mini",
model: "gpt-5-codex-mini",
display_name: "gpt-5-codex-mini",
description: "Optimized for codex. Cheaper, faster, but less capable.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
ModelPreset {
id: "gpt-5",
model: "gpt-5",
@@ -98,13 +80,8 @@ const PRESETS: &[ModelPreset] = &[
},
];
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
let allow_codex_mini = matches!(auth_mode, Some(AuthMode::ChatGPT));
PRESETS
.iter()
.filter(|preset| allow_codex_mini || preset.id != "gpt-5-codex-mini")
.copied()
.collect()
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]

View File

@@ -32,7 +32,6 @@ codex-utils-pty = { workspace = true }
codex-utils-readiness = { workspace = true }
codex-utils-string = { workspace = true }
codex-utils-tokenizer = { workspace = true }
codex-windows-sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
dirs = { workspace = true }
dunce = { workspace = true }
env-flags = { workspace = true }
@@ -81,9 +80,10 @@ toml_edit = { workspace = true }
tracing = { workspace = true, features = ["log"] }
tree-sitter = { workspace = true }
tree-sitter-bash = { workspace = true }
uuid = { workspace = true, features = ["serde", "v4", "v5"] }
uuid = { workspace = true, features = ["serde", "v4"] }
which = { workspace = true }
wildmatch = { workspace = true }
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
[target.'cfg(target_os = "linux")'.dependencies]
@@ -104,9 +104,7 @@ openssl-sys = { workspace = true, features = ["vendored"] }
[dev-dependencies]
assert_cmd = { workspace = true }
assert_matches = { workspace = true }
codex-arg0 = { workspace = true }
core_test_support = { workspace = true }
ctor = { workspace = true }
escargot = { workspace = true }
image = { workspace = true, features = ["jpeg", "png"] }
maplit = { workspace = true }

View File

@@ -16,7 +16,6 @@ You are Codex, based on GPT-5. You are running as a coding agent in the Codex CL
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
* If the changes are in unrelated files, just ignore them and don't revert them.
- Do not amend a commit unless explicitly requested to do so.
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user.

View File

@@ -1,14 +1,12 @@
mod storage;
use chrono::Utc;
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
#[cfg(test)]
use serial_test::serial;
use std::env;
use std::fmt::Debug;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -24,16 +22,10 @@ use crate::auth::storage::AuthStorageBackend;
use crate::auth::storage::create_auth_storage;
use crate::config::Config;
use crate::default_client::CodexHttpClient;
use crate::error::RefreshTokenFailedError;
use crate::error::RefreshTokenFailedReason;
use crate::token_data::KnownPlan as InternalKnownPlan;
use crate::token_data::PlanType as InternalPlanType;
use crate::token_data::PlanType;
use crate::token_data::TokenData;
use crate::token_data::parse_id_token;
use crate::util::try_parse_error_message;
use codex_protocol::account::PlanType as AccountPlanType;
use serde_json::Value;
use thiserror::Error;
#[derive(Debug, Clone)]
pub struct CodexAuth {
@@ -54,54 +46,18 @@ impl PartialEq for CodexAuth {
// TODO(pakrym): use token exp field to check for expiration instead
const TOKEN_REFRESH_INTERVAL: i64 = 8;
const REFRESH_TOKEN_EXPIRED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token has expired. Please log out and sign in again.";
const REFRESH_TOKEN_REUSED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was already used. Please log out and sign in again.";
const REFRESH_TOKEN_INVALIDATED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was revoked. Please log out and sign in again.";
const REFRESH_TOKEN_UNKNOWN_MESSAGE: &str =
"Your access token could not be refreshed. Please log out and sign in again.";
const REFRESH_TOKEN_URL: &str = "https://auth.openai.com/oauth/token";
pub const REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR: &str = "CODEX_REFRESH_TOKEN_URL_OVERRIDE";
#[derive(Debug, Error)]
pub enum RefreshTokenError {
#[error("{0}")]
Permanent(#[from] RefreshTokenFailedError),
#[error(transparent)]
Transient(#[from] std::io::Error),
}
impl RefreshTokenError {
pub fn failed_reason(&self) -> Option<RefreshTokenFailedReason> {
match self {
Self::Permanent(error) => Some(error.reason),
Self::Transient(_) => None,
}
}
fn other_with_message(message: impl Into<String>) -> Self {
Self::Transient(std::io::Error::other(message.into()))
}
}
impl From<RefreshTokenError> for std::io::Error {
fn from(err: RefreshTokenError) -> Self {
match err {
RefreshTokenError::Permanent(failed) => std::io::Error::other(failed),
RefreshTokenError::Transient(inner) => inner,
}
}
}
impl CodexAuth {
pub async fn refresh_token(&self) -> Result<String, RefreshTokenError> {
pub async fn refresh_token(&self) -> Result<String, std::io::Error> {
tracing::info!("Refreshing token");
let token_data = self.get_current_token_data().ok_or_else(|| {
RefreshTokenError::Transient(std::io::Error::other("Token data is not available."))
})?;
let token_data = self
.get_current_token_data()
.ok_or(std::io::Error::other("Token data is not available."))?;
let token = token_data.refresh_token;
let refresh_response = try_refresh_token(token, &self.client).await?;
let refresh_response = try_refresh_token(token, &self.client)
.await
.map_err(std::io::Error::other)?;
let updated = update_tokens(
&self.storage,
@@ -109,8 +65,7 @@ impl CodexAuth {
refresh_response.access_token,
refresh_response.refresh_token,
)
.await
.map_err(RefreshTokenError::from)?;
.await?;
if let Ok(mut auth_lock) = self.auth_dot_json.lock() {
*auth_lock = Some(updated.clone());
@@ -119,7 +74,7 @@ impl CodexAuth {
let access = match updated.tokens {
Some(t) => t.access_token,
None => {
return Err(RefreshTokenError::other_with_message(
return Err(std::io::Error::other(
"Token data is not available after refresh.",
));
}
@@ -144,21 +99,15 @@ impl CodexAuth {
..
}) => {
if last_refresh < Utc::now() - chrono::Duration::days(TOKEN_REFRESH_INTERVAL) {
let refresh_result = tokio::time::timeout(
let refresh_response = tokio::time::timeout(
Duration::from_secs(60),
try_refresh_token(tokens.refresh_token.clone(), &self.client),
)
.await;
let refresh_response = match refresh_result {
Ok(Ok(response)) => response,
Ok(Err(err)) => return Err(err.into()),
Err(_) => {
return Err(std::io::Error::new(
ErrorKind::TimedOut,
"timed out while refreshing OpenAI API key",
));
}
};
.await
.map_err(|_| {
std::io::Error::other("timed out while refreshing OpenAI API key")
})?
.map_err(std::io::Error::other)?;
let updated_auth_dot_json = update_tokens(
&self.storage,
@@ -204,34 +153,7 @@ impl CodexAuth {
self.get_current_token_data().and_then(|t| t.id_token.email)
}
/// Account-facing plan classification derived from the current token.
/// Returns a high-level `AccountPlanType` (e.g., Free/Plus/Pro/Team/…)
/// mapped from the ID token's internal plan value. Prefer this when you
/// need to make UI or product decisions based on the user's subscription.
pub fn account_plan_type(&self) -> Option<AccountPlanType> {
let map_known = |kp: &InternalKnownPlan| match kp {
InternalKnownPlan::Free => AccountPlanType::Free,
InternalKnownPlan::Plus => AccountPlanType::Plus,
InternalKnownPlan::Pro => AccountPlanType::Pro,
InternalKnownPlan::Team => AccountPlanType::Team,
InternalKnownPlan::Business => AccountPlanType::Business,
InternalKnownPlan::Enterprise => AccountPlanType::Enterprise,
InternalKnownPlan::Edu => AccountPlanType::Edu,
};
self.get_current_token_data()
.and_then(|t| t.id_token.chatgpt_plan_type)
.map(|pt| match pt {
InternalPlanType::Known(k) => map_known(&k),
InternalPlanType::Unknown(_) => AccountPlanType::Unknown,
})
}
/// Raw internal plan value from the ID token.
/// Exposes the underlying `token_data::PlanType` without mapping it to the
/// public `AccountPlanType`. Use this when downstream code needs to inspect
/// internal/unknown plan strings exactly as issued in the token.
pub(crate) fn get_plan_type(&self) -> Option<InternalPlanType> {
pub(crate) fn get_plan_type(&self) -> Option<PlanType> {
self.get_current_token_data()
.and_then(|t| t.id_token.chatgpt_plan_type)
}
@@ -503,7 +425,7 @@ async fn update_tokens(
async fn try_refresh_token(
refresh_token: String,
client: &CodexHttpClient,
) -> Result<RefreshResponse, RefreshTokenError> {
) -> std::io::Result<RefreshResponse> {
let refresh_request = RefreshRequest {
client_id: CLIENT_ID,
grant_type: "refresh_token",
@@ -511,93 +433,30 @@ async fn try_refresh_token(
scope: "openid profile email",
};
let endpoint = refresh_token_endpoint();
// Use shared client factory to include standard headers
let response = client
.post(endpoint.as_str())
.post("https://auth.openai.com/oauth/token")
.header("Content-Type", "application/json")
.json(&refresh_request)
.send()
.await
.map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?;
.map_err(std::io::Error::other)?;
let status = response.status();
if status.is_success() {
if response.status().is_success() {
let refresh_response = response
.json::<RefreshResponse>()
.await
.map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?;
.map_err(std::io::Error::other)?;
Ok(refresh_response)
} else {
let body = response.text().await.unwrap_or_default();
if status == StatusCode::UNAUTHORIZED {
let failed = classify_refresh_token_failure(&body);
Err(RefreshTokenError::Permanent(failed))
} else {
let message = try_parse_error_message(&body);
Err(RefreshTokenError::Transient(std::io::Error::other(
format!("Failed to refresh token: {status}: {message}"),
)))
}
Err(std::io::Error::other(format!(
"Failed to refresh token: {}: {}",
response.status(),
try_parse_error_message(&response.text().await.unwrap_or_default()),
)))
}
}
fn classify_refresh_token_failure(body: &str) -> RefreshTokenFailedError {
let code = extract_refresh_token_error_code(body);
let normalized_code = code.as_deref().map(str::to_ascii_lowercase);
let reason = match normalized_code.as_deref() {
Some("refresh_token_expired") => RefreshTokenFailedReason::Expired,
Some("refresh_token_reused") => RefreshTokenFailedReason::Exhausted,
Some("refresh_token_invalidated") => RefreshTokenFailedReason::Revoked,
_ => RefreshTokenFailedReason::Other,
};
if reason == RefreshTokenFailedReason::Other {
tracing::warn!(
backend_code = normalized_code.as_deref(),
backend_body = body,
"Encountered unknown 401 response while refreshing token"
);
}
let message = match reason {
RefreshTokenFailedReason::Expired => REFRESH_TOKEN_EXPIRED_MESSAGE.to_string(),
RefreshTokenFailedReason::Exhausted => REFRESH_TOKEN_REUSED_MESSAGE.to_string(),
RefreshTokenFailedReason::Revoked => REFRESH_TOKEN_INVALIDATED_MESSAGE.to_string(),
RefreshTokenFailedReason::Other => REFRESH_TOKEN_UNKNOWN_MESSAGE.to_string(),
};
RefreshTokenFailedError::new(reason, message)
}
fn extract_refresh_token_error_code(body: &str) -> Option<String> {
if body.trim().is_empty() {
return None;
}
let Value::Object(map) = serde_json::from_str::<Value>(body).ok()? else {
return None;
};
if let Some(error_value) = map.get("error") {
match error_value {
Value::Object(obj) => {
if let Some(code) = obj.get("code").and_then(Value::as_str) {
return Some(code.to_string());
}
}
Value::String(code) => {
return Some(code.to_string());
}
_ => {}
}
}
map.get("code").and_then(Value::as_str).map(str::to_string)
}
#[derive(Serialize)]
struct RefreshRequest {
client_id: &'static str,
@@ -616,11 +475,6 @@ struct RefreshResponse {
// Shared constant for token refresh (client id used for oauth token refresh flow)
pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann";
fn refresh_token_endpoint() -> String {
std::env::var(REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR)
.unwrap_or_else(|_| REFRESH_TOKEN_URL.to_string())
}
use std::sync::RwLock;
/// Internal cached auth state.
@@ -638,9 +492,8 @@ mod tests {
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;
use crate::token_data::IdTokenInfo;
use crate::token_data::KnownPlan as InternalKnownPlan;
use crate::token_data::PlanType as InternalPlanType;
use codex_protocol::account::PlanType as AccountPlanType;
use crate::token_data::KnownPlan;
use crate::token_data::PlanType;
use base64::Engine;
use codex_protocol::config_types::ForcedLoginMethod;
@@ -757,7 +610,7 @@ mod tests {
tokens: Some(TokenData {
id_token: IdTokenInfo {
email: Some("user@example.com".to_string()),
chatgpt_plan_type: Some(InternalPlanType::Known(InternalKnownPlan::Pro)),
chatgpt_plan_type: Some(PlanType::Known(KnownPlan::Pro)),
chatgpt_account_id: None,
raw_jwt: fake_jwt,
},
@@ -1011,54 +864,6 @@ mod tests {
.contains("ChatGPT login is required, but an API key is currently being used.")
);
}
#[test]
fn plan_type_maps_known_plan() {
let codex_home = tempdir().unwrap();
let _jwt = write_auth_file(
AuthFileParams {
openai_api_key: None,
chatgpt_plan_type: "pro".to_string(),
chatgpt_account_id: None,
},
codex_home.path(),
)
.expect("failed to write auth file");
let auth = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File)
.expect("load auth")
.expect("auth available");
pretty_assertions::assert_eq!(auth.account_plan_type(), Some(AccountPlanType::Pro));
pretty_assertions::assert_eq!(
auth.get_plan_type(),
Some(InternalPlanType::Known(InternalKnownPlan::Pro))
);
}
#[test]
fn plan_type_maps_unknown_to_unknown() {
let codex_home = tempdir().unwrap();
let _jwt = write_auth_file(
AuthFileParams {
openai_api_key: None,
chatgpt_plan_type: "mystery-tier".to_string(),
chatgpt_account_id: None,
},
codex_home.path(),
)
.expect("failed to write auth file");
let auth = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File)
.expect("load auth")
.expect("auth available");
pretty_assertions::assert_eq!(auth.account_plan_type(), Some(AccountPlanType::Unknown));
pretty_assertions::assert_eq!(
auth.get_plan_type(),
Some(InternalPlanType::Unknown("mystery-tier".to_string()))
);
}
}
/// Central manager providing a single source of truth for auth.json derived
@@ -1160,9 +965,7 @@ impl AuthManager {
/// Attempt to refresh the current auth token (if any). On success, reload
/// the auth state from disk so other components observe refreshed token.
/// If the token refresh fails in a permanent (nontransient) way, logs out
/// to clear invalid auth state.
pub async fn refresh_token(&self) -> Result<Option<String>, RefreshTokenError> {
pub async fn refresh_token(&self) -> std::io::Result<Option<String>> {
let auth = match self.auth() {
Some(a) => a,
None => return Ok(None),

View File

@@ -88,33 +88,17 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
Some(commands)
}
pub fn is_well_known_sh_shell(shell: &str) -> bool {
if shell == "bash" || shell == "zsh" {
return true;
}
let shell_name = std::path::Path::new(shell)
.file_name()
.and_then(|s| s.to_str())
.unwrap_or(shell);
matches!(shell_name, "bash" | "zsh")
}
pub fn extract_bash_command(command: &[String]) -> Option<(&str, &str)> {
let [shell, flag, script] = command else {
return None;
};
if !matches!(flag.as_str(), "-lc" | "-c") || !is_well_known_sh_shell(shell) {
return None;
}
Some((shell, script))
}
/// Returns the sequence of plain commands within a `bash -lc "..."` or
/// `zsh -lc "..."` invocation when the script only contains word-only commands
/// joined by safe operators.
pub fn parse_shell_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> {
let (_, script) = extract_bash_command(command)?;
let [shell, flag, script] = command else {
return None;
};
if flag != "-lc" || !(shell == "bash" || shell == "zsh") {
return None;
}
let tree = try_parse_shell(script)?;
try_parse_word_only_commands_sequence(&tree, script)

View File

@@ -31,7 +31,6 @@ use tracing::warn;
use crate::AuthManager;
use crate::auth::CodexAuth;
use crate::auth::RefreshTokenError;
use crate::chat_completions::AggregateStreamExt;
use crate::chat_completions::stream_chat_completions;
use crate::client_common::Prompt;
@@ -390,17 +389,12 @@ impl ModelClient {
&& let Some(manager) = auth_manager.as_ref()
&& let Some(auth) = auth.as_ref()
&& auth.mode == AuthMode::ChatGPT
&& let Err(err) = manager.refresh_token().await
{
let stream_error = match err {
RefreshTokenError::Permanent(failed) => {
StreamAttemptError::Fatal(CodexErr::RefreshTokenFailed(failed))
}
RefreshTokenError::Transient(other) => {
StreamAttemptError::RetryableTransportError(CodexErr::Io(other))
}
};
return Err(stream_error);
manager.refresh_token().await.map_err(|err| {
StreamAttemptError::Fatal(CodexErr::Fatal(format!(
"Failed to refresh ChatGPT credentials: {err}"
)))
})?;
}
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
@@ -447,8 +441,6 @@ impl ModelClient {
return Err(StreamAttemptError::Fatal(codex_err));
} else if error.r#type.as_deref() == Some("usage_not_included") {
return Err(StreamAttemptError::Fatal(CodexErr::UsageNotIncluded));
} else if is_quota_exceeded_error(&error) {
return Err(StreamAttemptError::Fatal(CodexErr::QuotaExceeded));
}
}
}
@@ -846,8 +838,6 @@ async fn process_sse<S>(
Ok(error) => {
if is_context_window_error(&error) {
response_error = Some(CodexErr::ContextWindowExceeded);
} else if is_quota_exceeded_error(&error) {
response_error = Some(CodexErr::QuotaExceeded);
} else {
let delay = try_parse_retry_after(&error);
let message = error.message.clone().unwrap_or_default();
@@ -941,10 +931,8 @@ async fn stream_from_fixture(
fn rate_limit_regex() -> &'static Regex {
static RE: OnceLock<Regex> = OnceLock::new();
// Match both OpenAI-style messages like "Please try again in 1.898s"
// and Azure OpenAI-style messages like "Try again in 35 seconds".
#[expect(clippy::unwrap_used)]
RE.get_or_init(|| Regex::new(r"(?i)try again in\s*(\d+(?:\.\d+)?)\s*(s|ms|seconds?)").unwrap())
RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap())
}
fn try_parse_retry_after(err: &Error) -> Option<Duration> {
@@ -952,8 +940,7 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
return None;
}
// parse retry hints like "try again in 1.898s" or
// "Try again in 35 seconds" using regex
// parse the Please try again in 1.898s format using regex
let re = rate_limit_regex();
if let Some(message) = &err.message
&& let Some(captures) = re.captures(message)
@@ -963,9 +950,9 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
if let (Some(value), Some(unit)) = (seconds, unit) {
let value = value.as_str().parse::<f64>().ok()?;
let unit = unit.as_str().to_ascii_lowercase();
let unit = unit.as_str();
if unit == "s" || unit.starts_with("second") {
if unit == "s" {
return Some(Duration::from_secs_f64(value));
} else if unit == "ms" {
return Some(Duration::from_millis(value as u64));
@@ -979,10 +966,6 @@ fn is_context_window_error(error: &Error) -> bool {
error.code.as_deref() == Some("context_length_exceeded")
}
fn is_quota_exceeded_error(error: &Error) -> bool {
error.code.as_deref() == Some("insufficient_quota")
}
#[cfg(test)]
mod tests {
use super::*;
@@ -1315,41 +1298,6 @@ mod tests {
}
}
#[tokio::test]
async fn quota_exceeded_error_is_fatal() {
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_fatal_quota","object":"response","created_at":1759771626,"status":"failed","background":false,"error":{"code":"insufficient_quota","message":"You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors."},"incomplete_details":null}}"#;
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
let provider = ModelProviderInfo {
name: "test".to_string(),
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
requires_openai_auth: false,
};
let otel_event_manager = otel_event_manager();
let events = collect_events(&[sse1.as_bytes()], provider, otel_event_manager).await;
assert_eq!(events.len(), 1);
match &events[0] {
Err(err @ CodexErr::QuotaExceeded) => {
assert_eq!(err.to_string(), CodexErr::QuotaExceeded.to_string());
}
other => panic!("unexpected quota exceeded event: {other:?}"),
}
}
// ────────────────────────────
// Table-driven test from `main`
// ────────────────────────────
@@ -1479,19 +1427,6 @@ mod tests {
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
}
#[test]
fn test_try_parse_retry_after_azure() {
let err = Error {
r#type: None,
message: Some("Rate limit exceeded. Try again in 35 seconds.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
plan_type: None,
resets_at: None,
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_secs(35)));
}
#[test]
fn error_response_deserializes_schema_known_plan_type_and_serializes_back() {
use crate::token_data::KnownPlan;

View File

@@ -6,7 +6,6 @@ use std::sync::atomic::AtomicU64;
use crate::AuthManager;
use crate::client_common::REVIEW_PROMPT;
use crate::compact;
use crate::features::Feature;
use crate::function_tool::FunctionCallError;
use crate::mcp::auth::McpAuthStatusEntry;
@@ -59,7 +58,7 @@ use crate::client_common::ResponseEvent;
use crate::config::Config;
use crate::config::types::McpServerTransportConfig;
use crate::config::types::ShellEnvironmentPolicy;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::environment_context::EnvironmentContext;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
@@ -67,8 +66,6 @@ use crate::error::Result as CodexResult;
use crate::exec::StreamOutput;
// Removed: legacy executor wiring replaced by ToolOrchestrator flows.
// legacy normalize_exec_result no longer used after orchestrator migration
use crate::compact::build_compacted_history;
use crate::compact::collect_user_messages;
use crate::mcp::auth::compute_auth_statuses;
use crate::mcp_connection_manager::McpConnectionManager;
use crate::model_family::find_family_for_model;
@@ -97,7 +94,6 @@ use crate::protocol::Submission;
use crate::protocol::TokenCountEvent;
use crate::protocol::TokenUsage;
use crate::protocol::TurnDiffEvent;
use crate::protocol::WarningEvent;
use crate::rollout::RolloutRecorder;
use crate::rollout::RolloutRecorderParams;
use crate::shell;
@@ -133,6 +129,10 @@ use codex_protocol::user_input::UserInput;
use codex_utils_readiness::Readiness;
use codex_utils_readiness::ReadinessFlag;
pub mod compact;
use self::compact::build_compacted_history;
use self::compact::collect_user_messages;
/// The high-level interface to the Codex system.
/// It operates as a queue pair where you send submissions and receive events.
pub struct Codex {
@@ -553,7 +553,7 @@ impl Session {
None
} else {
Some(format!(
"Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
"You can either enable it using the CLI with `--enable {canonical}` or through the config.toml file with `[features].{canonical}`"
))
};
post_session_configured_events.push(Event {
@@ -675,34 +675,6 @@ impl Session {
let rollout_items = conversation_history.get_rollout_items();
let persist = matches!(conversation_history, InitialHistory::Forked(_));
// If resuming, warn when the last recorded model differs from the current one.
if let InitialHistory::Resumed(_) = conversation_history
&& let Some(prev) = rollout_items.iter().rev().find_map(|it| {
if let RolloutItem::TurnContext(ctx) = it {
Some(ctx.model.as_str())
} else {
None
}
})
{
let curr = turn_context.client.get_model();
if prev != curr {
warn!(
"resuming session with different model: previous={prev}, current={curr}"
);
self.send_event(
&turn_context,
EventMsg::Warning(WarningEvent {
message: format!(
"This session was recorded with model `{prev}` but is resuming with `{curr}`. \
Consider switching back to `{prev}` as it may affect Codex performance."
),
}),
)
.await;
}
}
// Always add response items to conversation history
let reconstructed_history =
self.reconstruct_history_from_rollout(&turn_context, &rollout_items);
@@ -973,7 +945,7 @@ impl Session {
turn_context: &TurnContext,
rollout_items: &[RolloutItem],
) -> Vec<ResponseItem> {
let mut history = ContextManager::new();
let mut history = ConversationHistory::new();
for item in rollout_items {
match item {
RolloutItem::ResponseItem(response_item) => {
@@ -996,7 +968,7 @@ impl Session {
}
/// Append ResponseItems to the in-memory conversation history only.
pub(crate) async fn record_into_history(&self, items: &[ResponseItem]) {
async fn record_into_history(&self, items: &[ResponseItem]) {
let mut state = self.state.lock().await;
state.record_items(items.iter());
}
@@ -1048,7 +1020,7 @@ impl Session {
items
}
pub(crate) async fn persist_rollout_items(&self, items: &[RolloutItem]) {
async fn persist_rollout_items(&self, items: &[RolloutItem]) {
let recorder = {
let guard = self.services.rollout.lock().await;
guard.clone()
@@ -1060,12 +1032,12 @@ impl Session {
}
}
pub(crate) async fn clone_history(&self) -> ContextManager {
pub(crate) async fn clone_history(&self) -> ConversationHistory {
let state = self.state.lock().await;
state.clone_history()
}
pub(crate) async fn update_token_usage_info(
async fn update_token_usage_info(
&self,
turn_context: &TurnContext,
token_usage: Option<&TokenUsage>,
@@ -1082,7 +1054,7 @@ impl Session {
self.send_token_count_event(turn_context).await;
}
pub(crate) async fn update_rate_limits(
async fn update_rate_limits(
&self,
turn_context: &TurnContext,
new_rate_limits: RateLimitSnapshot,
@@ -1103,7 +1075,7 @@ impl Session {
self.send_event(turn_context, event).await;
}
pub(crate) async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
let context_window = turn_context.client.get_model_context_window();
if let Some(context_window) = context_window {
{
@@ -1146,11 +1118,7 @@ impl Session {
self.send_event(turn_context, event).await;
}
pub(crate) async fn notify_stream_error(
&self,
turn_context: &TurnContext,
message: impl Into<String>,
) {
async fn notify_stream_error(&self, turn_context: &TurnContext, message: impl Into<String>) {
let event = EventMsg::StreamError(StreamErrorEvent {
message: message.into(),
});
@@ -1675,7 +1643,8 @@ async fn spawn_review_thread(
let mut review_features = config.features.clone();
review_features
.disable(crate::features::Feature::WebSearchRequest)
.disable(crate::features::Feature::ViewImageTool);
.disable(crate::features::Feature::ViewImageTool)
.disable(crate::features::Feature::StreamableShell);
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &review_model_family,
features: &review_features,
@@ -1804,14 +1773,19 @@ pub(crate) async fn run_task(
sess.clone_history().await.get_history_for_prompt()
};
let turn_input_messages = turn_input
let turn_input_messages: Vec<String> = turn_input
.iter()
.filter_map(|item| match parse_turn_item(item) {
Some(TurnItem::UserMessage(user_message)) => Some(user_message),
.filter_map(|item| match item {
ResponseItem::Message { content, .. } => Some(content),
_ => None,
})
.map(|user_message| user_message.message())
.collect::<Vec<String>>();
.flat_map(|content| {
content.iter().filter_map(|item| match item {
ContentItem::OutputText { text } => Some(text.clone()),
_ => None,
})
})
.collect();
match run_turn(
Arc::clone(&sess),
Arc::clone(&turn_context),
@@ -1959,8 +1933,6 @@ async fn run_turn(
return Err(CodexErr::UsageLimitReached(e));
}
Err(CodexErr::UsageNotIncluded) => return Err(CodexErr::UsageNotIncluded),
Err(e @ CodexErr::QuotaExceeded) => return Err(e),
Err(e @ CodexErr::RefreshTokenFailed(_)) => return Err(e),
Err(e) => {
// Use the configured provider-specific stream retry budget.
let max_retries = turn_context.client.get_provider().stream_max_retries();
@@ -1979,7 +1951,7 @@ async fn run_turn(
// at a seemingly frozen screen.
sess.notify_stream_error(
&turn_context,
format!("Reconnecting... {retries}/{max_retries}"),
format!("Re-connecting... {retries}/{max_retries}"),
)
.await;
@@ -2352,7 +2324,6 @@ mod tests {
use crate::tools::context::ToolOutput;
use crate::tools::context::ToolPayload;
use crate::tools::handlers::ShellHandler;
use crate::tools::handlers::UnifiedExecHandler;
use crate::tools::registry::ToolHandler;
use crate::turn_diff_tracker::TurnDiffTracker;
use codex_app_server_protocol::AuthMode;
@@ -2868,7 +2839,7 @@ mod tests {
turn_context: &TurnContext,
) -> (Vec<RolloutItem>, Vec<ResponseItem>) {
let mut rollout_items = Vec::new();
let mut live_history = ContextManager::new();
let mut live_history = ConversationHistory::new();
let initial_context = session.build_initial_context(turn_context);
for item in &initial_context {
@@ -3092,48 +3063,6 @@ mod tests {
assert!(exec_output.output.contains("hi"));
}
#[tokio::test]
async fn unified_exec_rejects_escalated_permissions_when_policy_not_on_request() {
use crate::protocol::AskForApproval;
use crate::turn_diff_tracker::TurnDiffTracker;
let (session, mut turn_context_raw) = make_session_and_context();
turn_context_raw.approval_policy = AskForApproval::OnFailure;
let session = Arc::new(session);
let turn_context = Arc::new(turn_context_raw);
let tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
let handler = UnifiedExecHandler;
let resp = handler
.handle(ToolInvocation {
session: Arc::clone(&session),
turn: Arc::clone(&turn_context),
tracker: Arc::clone(&tracker),
call_id: "exec-call".to_string(),
tool_name: "exec_command".to_string(),
payload: ToolPayload::Function {
arguments: serde_json::json!({
"cmd": "echo hi",
"with_escalated_permissions": true,
"justification": "need unsandboxed execution",
})
.to_string(),
},
})
.await;
let Err(FunctionCallError::RespondToModel(output)) = resp else {
panic!("expected error result");
};
let expected = format!(
"approval policy is {policy:?}; reject command — you cannot ask for escalated permissions if the approval policy is {policy:?}",
policy = turn_context.approval_policy
);
pretty_assertions::assert_eq!(output, expected);
}
#[test]
fn mcp_init_error_display_prompts_for_github_pat() {
let server_name = "github";

View File

@@ -1,10 +1,10 @@
use std::sync::Arc;
use super::Session;
use super::TurnContext;
use super::get_last_assistant_message_from_turn;
use crate::Prompt;
use crate::client_common::ResponseEvent;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::codex::get_last_assistant_message_from_turn;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::protocol::AgentMessageEvent;
@@ -16,6 +16,7 @@ use crate::protocol::TurnContextItem;
use crate::protocol::WarningEvent;
use crate::truncate::truncate_middle;
use crate::util::backoff;
use askama::Template;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
@@ -25,9 +26,16 @@ use codex_protocol::user_input::UserInput;
use futures::prelude::*;
use tracing::error;
pub const SUMMARIZATION_PROMPT: &str = include_str!("../templates/compact/prompt.md");
pub const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000;
#[derive(Template)]
#[template(path = "compact/history_bridge.md", escape = "none")]
struct HistoryBridgeTemplate<'a> {
user_messages_text: &'a str,
summary_text: &'a str,
}
pub(crate) async fn run_inline_auto_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
@@ -142,7 +150,6 @@ async fn run_compact_task_inner(
let history_snapshot = sess.clone_history().await.get_history();
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
let user_messages = collect_user_messages(&history_snapshot);
let initial_context = sess.build_initial_context(turn_context.as_ref());
let mut new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
let ghost_snapshots: Vec<ResponseItem> = history_snapshot
@@ -164,7 +171,7 @@ async fn run_compact_task_inner(
sess.send_event(&turn_context, event).await;
let warning = EventMsg::Warning(WarningEvent {
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start a new conversation when possible to keep conversations small and targeted.".to_string(),
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
});
sess.send_event(&turn_context, warning).await;
}
@@ -217,47 +224,33 @@ fn build_compacted_history_with_limit(
summary_text: &str,
max_bytes: usize,
) -> Vec<ResponseItem> {
let mut selected_messages: Vec<String> = Vec::new();
if max_bytes > 0 {
let mut remaining = max_bytes;
for message in user_messages.iter().rev() {
if remaining == 0 {
break;
}
if message.len() <= remaining {
selected_messages.push(message.clone());
remaining = remaining.saturating_sub(message.len());
} else {
let (truncated, _) = truncate_middle(message, remaining);
selected_messages.push(truncated);
break;
}
}
selected_messages.reverse();
let mut user_messages_text = if user_messages.is_empty() {
"(none)".to_string()
} else {
user_messages.join("\n\n")
};
// Truncate the concatenated prior user messages so the bridge message
// stays well under the context window (approx. 4 bytes/token).
if user_messages_text.len() > max_bytes {
user_messages_text = truncate_middle(&user_messages_text, max_bytes).0;
}
for message in &selected_messages {
history.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: message.clone(),
}],
});
}
let summary_text = if summary_text.is_empty() {
"(no summary available)".to_string()
} else {
summary_text.to_string()
};
let Ok(bridge) = HistoryBridgeTemplate {
user_messages_text: &user_messages_text,
summary_text: &summary_text,
}
.render() else {
return vec![];
};
history.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: summary_text }],
content: vec![ContentItem::InputText { text: bridge }],
});
history
}
@@ -397,55 +390,30 @@ mod tests {
"SUMMARY",
max_bytes,
);
assert_eq!(history.len(), 2);
let truncated_message = &history[0];
let summary_message = &history[1];
// Expect exactly one bridge message added to history (plus any initial context we provided, which is none).
assert_eq!(history.len(), 1);
let truncated_text = match truncated_message {
// Extract the text content of the bridge message.
let bridge_text = match &history[0] {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("unexpected item in history: {other:?}"),
};
// The bridge should contain the truncation marker and not the full original payload.
assert!(
truncated_text.contains("tokens truncated"),
"expected truncation marker in truncated user message"
bridge_text.contains("tokens truncated"),
"expected truncation marker in bridge message"
);
assert!(
!truncated_text.contains(&big),
"truncated user message should not include the full oversized user text"
!bridge_text.contains(&big),
"bridge should not include the full oversized user text"
);
let summary_text = match summary_message {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("unexpected item in history: {other:?}"),
};
assert_eq!(summary_text, "SUMMARY");
}
#[test]
fn build_compacted_history_appends_summary_message() {
let initial_context: Vec<ResponseItem> = Vec::new();
let user_messages = vec!["first user message".to_string()];
let summary_text = "summary text";
let history = build_compacted_history(initial_context, &user_messages, summary_text);
assert!(
!history.is_empty(),
"expected compacted history to include summary"
bridge_text.contains("SUMMARY"),
"bridge should include the provided summary text"
);
let last = history.last().expect("history should have a summary entry");
let summary = match last {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).unwrap_or_default()
}
other => panic!("expected summary message, found {other:?}"),
};
assert_eq!(summary, summary_text);
}
}

View File

@@ -158,11 +158,6 @@ async fn forward_events(
) {
while let Ok(event) = codex.next_event().await {
match event {
// ignore all legacy delta events
Event {
id: _,
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
} => continue,
Event {
id: _,
msg: EventMsg::SessionConfigured(_),

View File

@@ -1,38 +1,4 @@
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use crate::bash::parse_shell_lc_plain_commands;
use crate::is_safe_command::is_known_safe_command;
pub fn requires_initial_appoval(
policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
command: &[String],
with_escalated_permissions: bool,
) -> bool {
if is_known_safe_command(command) {
return false;
}
match policy {
AskForApproval::Never | AskForApproval::OnFailure => false,
AskForApproval::OnRequest => {
// In DangerFullAccess, only prompt if the command looks dangerous.
if matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) {
return command_might_be_dangerous(command);
}
// In restricted sandboxes (ReadOnly/WorkspaceWrite), do not prompt for
// nonescalated, nondangerous commands — let the sandbox enforce
// restrictions (e.g., block network/write) without a user prompt.
let wants_escalation: bool = with_escalated_permissions;
if wants_escalation {
return true;
}
command_might_be_dangerous(command)
}
AskForApproval::UnlessTrusted => !is_known_safe_command(command),
}
}
pub fn command_might_be_dangerous(command: &[String]) -> bool {
if is_dangerous_to_call_with_exec(command) {

View File

@@ -23,10 +23,6 @@ pub enum ConfigEdit {
},
/// Toggle the acknowledgement flag under `[notice]`.
SetNoticeHideFullAccessWarning(bool),
/// Toggle the Windows world-writable directories warning acknowledgement flag.
SetNoticeHideWorldWritableWarning(bool),
/// Toggle the rate limit model nudge acknowledgement flag.
SetNoticeHideRateLimitModelNudge(bool),
/// Toggle the Windows onboarding acknowledgement flag.
SetWindowsWslSetupAcknowledged(bool),
/// Replace the entire `[mcp_servers]` table.
@@ -243,16 +239,6 @@ impl ConfigDocument {
&[Notice::TABLE_KEY, "hide_full_access_warning"],
value(*acknowledged),
)),
ConfigEdit::SetNoticeHideWorldWritableWarning(acknowledged) => Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, "hide_world_writable_warning"],
value(*acknowledged),
)),
ConfigEdit::SetNoticeHideRateLimitModelNudge(acknowledged) => Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, "hide_rate_limit_model_nudge"],
value(*acknowledged),
)),
ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged) => Ok(self.write_value(
Scope::Global,
&["windows_wsl_setup_acknowledged"],
@@ -487,18 +473,6 @@ impl ConfigEditsBuilder {
self
}
pub fn set_hide_world_writable_warning(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideWorldWritableWarning(acknowledged));
self
}
pub fn set_hide_rate_limit_model_nudge(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideRateLimitModelNudge(acknowledged));
self
}
pub fn set_windows_wsl_setup_acknowledged(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged));
@@ -746,34 +720,6 @@ hide_full_access_warning = true
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_hide_rate_limit_model_nudge_preserves_table() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[notice]
existing = "value"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetNoticeHideRateLimitModelNudge(true)],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[notice]
existing = "value"
hide_rate_limit_model_nudge = true
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_replace_mcp_servers_round_trips() {
let tmp = tempdir().expect("tmpdir");

View File

@@ -241,6 +241,8 @@ pub struct Config {
/// When `true`, run a model-based assessment for commands denied by the sandbox.
pub experimental_sandbox_command_assessment: bool,
pub use_experimental_streamable_shell_tool: bool,
/// If set to `true`, used only the experimental unified exec tool.
pub use_experimental_unified_exec_tool: bool,
@@ -653,6 +655,7 @@ pub struct ConfigToml {
/// Legacy, now use features
pub experimental_instructions_file: Option<PathBuf>,
pub experimental_compact_prompt_file: Option<PathBuf>,
pub experimental_use_exec_command_tool: Option<bool>,
pub experimental_use_unified_exec_tool: Option<bool>,
pub experimental_use_rmcp_client: Option<bool>,
pub experimental_use_freeform_apply_patch: Option<bool>,
@@ -996,6 +999,7 @@ impl Config {
let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform);
let tools_web_search_request = features.enabled(Feature::WebSearchRequest);
let use_experimental_streamable_shell_tool = features.enabled(Feature::StreamableShell);
let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
let use_experimental_use_rmcp_client = features.enabled(Feature::RmcpClient);
let experimental_sandbox_command_assessment =
@@ -1152,6 +1156,7 @@ impl Config {
include_apply_patch_tool: include_apply_patch_tool_flag,
tools_web_search_request,
experimental_sandbox_command_assessment,
use_experimental_streamable_shell_tool,
use_experimental_unified_exec_tool,
use_experimental_use_rmcp_client,
features,
@@ -1710,6 +1715,7 @@ trust_level = "trusted"
fn legacy_toggles_map_to_features() -> std::io::Result<()> {
let codex_home = TempDir::new()?;
let cfg = ConfigToml {
experimental_use_exec_command_tool: Some(true),
experimental_use_unified_exec_tool: Some(true),
experimental_use_rmcp_client: Some(true),
experimental_use_freeform_apply_patch: Some(true),
@@ -1723,11 +1729,12 @@ trust_level = "trusted"
)?;
assert!(config.features.enabled(Feature::ApplyPatchFreeform));
assert!(config.features.enabled(Feature::StreamableShell));
assert!(config.features.enabled(Feature::UnifiedExec));
assert!(config.features.enabled(Feature::RmcpClient));
assert!(config.include_apply_patch_tool);
assert!(config.use_experimental_streamable_shell_tool);
assert!(config.use_experimental_unified_exec_tool);
assert!(config.use_experimental_use_rmcp_client);
@@ -2895,6 +2902,7 @@ model_verbosity = "high"
include_apply_patch_tool: false,
tools_web_search_request: false,
experimental_sandbox_command_assessment: false,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_use_rmcp_client: false,
features: Features::with_defaults(),
@@ -2966,6 +2974,7 @@ model_verbosity = "high"
include_apply_patch_tool: false,
tools_web_search_request: false,
experimental_sandbox_command_assessment: false,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_use_rmcp_client: false,
features: Features::with_defaults(),
@@ -3052,6 +3061,7 @@ model_verbosity = "high"
include_apply_patch_tool: false,
tools_web_search_request: false,
experimental_sandbox_command_assessment: false,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_use_rmcp_client: false,
features: Features::with_defaults(),
@@ -3124,6 +3134,7 @@ model_verbosity = "high"
include_apply_patch_tool: false,
tools_web_search_request: false,
experimental_sandbox_command_assessment: false,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_use_rmcp_client: false,
features: Features::with_defaults(),

View File

@@ -25,6 +25,7 @@ pub struct ConfigProfile {
pub experimental_compact_prompt_file: Option<PathBuf>,
pub include_apply_patch_tool: Option<bool>,
pub experimental_use_unified_exec_tool: Option<bool>,
pub experimental_use_exec_command_tool: Option<bool>,
pub experimental_use_rmcp_client: Option<bool>,
pub experimental_use_freeform_apply_patch: Option<bool>,
pub experimental_sandbox_command_assessment: Option<bool>,

View File

@@ -358,10 +358,6 @@ pub struct Tui {
pub struct Notice {
/// Tracks whether the user has acknowledged the full access warning prompt.
pub hide_full_access_warning: Option<bool>,
/// Tracks whether the user has acknowledged the Windows world-writable directories warning.
pub hide_world_writable_warning: Option<bool>,
/// Tracks whether the user opted out of the rate limit model switch reminder.
pub hide_rate_limit_model_nudge: Option<bool>,
}
impl Notice {

View File

@@ -1,174 +0,0 @@
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::TokenUsage;
use codex_protocol::protocol::TokenUsageInfo;
use std::ops::Deref;
use crate::context_manager::normalize;
use crate::context_manager::truncate::format_output_for_model_body;
use crate::context_manager::truncate::globally_truncate_function_output_items;
/// Transcript of conversation history
#[derive(Debug, Clone, Default)]
pub(crate) struct ContextManager {
/// The oldest items are at the beginning of the vector.
items: Vec<ResponseItem>,
token_info: Option<TokenUsageInfo>,
}
impl ContextManager {
pub(crate) fn new() -> Self {
Self {
items: Vec::new(),
token_info: TokenUsageInfo::new_or_append(&None, &None, None),
}
}
pub(crate) fn token_info(&self) -> Option<TokenUsageInfo> {
self.token_info.clone()
}
pub(crate) fn set_token_usage_full(&mut self, context_window: i64) {
match &mut self.token_info {
Some(info) => info.fill_to_context_window(context_window),
None => {
self.token_info = Some(TokenUsageInfo::full_context_window(context_window));
}
}
}
/// `items` is ordered from oldest to newest.
pub(crate) fn record_items<I>(&mut self, items: I)
where
I: IntoIterator,
I::Item: std::ops::Deref<Target = ResponseItem>,
{
for item in items {
let item_ref = item.deref();
let is_ghost_snapshot = matches!(item_ref, ResponseItem::GhostSnapshot { .. });
if !is_api_message(item_ref) && !is_ghost_snapshot {
continue;
}
let processed = Self::process_item(&item);
self.items.push(processed);
}
}
pub(crate) fn get_history(&mut self) -> Vec<ResponseItem> {
self.normalize_history();
self.contents()
}
// Returns the history prepared for sending to the model.
// With extra response items filtered out and GhostCommits removed.
pub(crate) fn get_history_for_prompt(&mut self) -> Vec<ResponseItem> {
let mut history = self.get_history();
Self::remove_ghost_snapshots(&mut history);
history
}
pub(crate) fn remove_first_item(&mut self) {
if !self.items.is_empty() {
// Remove the oldest item (front of the list). Items are ordered from
// oldest → newest, so index 0 is the first entry recorded.
let removed = self.items.remove(0);
// If the removed item participates in a call/output pair, also remove
// its corresponding counterpart to keep the invariants intact without
// running a full normalization pass.
normalize::remove_corresponding_for(&mut self.items, &removed);
}
}
pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) {
self.items = items;
}
pub(crate) fn update_token_info(
&mut self,
usage: &TokenUsage,
model_context_window: Option<i64>,
) {
self.token_info = TokenUsageInfo::new_or_append(
&self.token_info,
&Some(usage.clone()),
model_context_window,
);
}
/// This function enforces a couple of invariants on the in-memory history:
/// 1. every call (function/custom) has a corresponding output entry
/// 2. every output has a corresponding call entry
fn normalize_history(&mut self) {
// all function/tool calls must have a corresponding output
normalize::ensure_call_outputs_present(&mut self.items);
// all outputs must have a corresponding function/tool call
normalize::remove_orphan_outputs(&mut self.items);
}
/// Returns a clone of the contents in the transcript.
fn contents(&self) -> Vec<ResponseItem> {
self.items.clone()
}
fn remove_ghost_snapshots(items: &mut Vec<ResponseItem>) {
items.retain(|item| !matches!(item, ResponseItem::GhostSnapshot { .. }));
}
fn process_item(item: &ResponseItem) -> ResponseItem {
match item {
ResponseItem::FunctionCallOutput { call_id, output } => {
let truncated = format_output_for_model_body(output.content.as_str());
let truncated_items = output
.content_items
.as_ref()
.map(|items| globally_truncate_function_output_items(items));
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: truncated,
content_items: truncated_items,
success: output.success,
},
}
}
ResponseItem::CustomToolCallOutput { call_id, output } => {
let truncated = format_output_for_model_body(output);
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: truncated,
}
}
ResponseItem::Message { .. }
| ResponseItem::Reasoning { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::WebSearchCall { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::GhostSnapshot { .. }
| ResponseItem::Other => item.clone(),
}
}
}
/// API messages include every non-system item (user/assistant messages, reasoning,
/// tool calls, tool outputs, shell calls, and web-search calls).
fn is_api_message(message: &ResponseItem) -> bool {
match message {
ResponseItem::Message { role, .. } => role.as_str() != "system",
ResponseItem::FunctionCallOutput { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::CustomToolCall { .. }
| ResponseItem::CustomToolCallOutput { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::Reasoning { .. }
| ResponseItem::WebSearchCall { .. } => true,
ResponseItem::GhostSnapshot { .. } => false,
ResponseItem::Other => false,
}
}
#[cfg(test)]
#[path = "history_tests.rs"]
mod tests;

View File

@@ -1,841 +0,0 @@
use super::*;
use crate::context_manager::truncate;
use codex_git::GhostCommit;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::LocalShellAction;
use codex_protocol::models::LocalShellExecAction;
use codex_protocol::models::LocalShellStatus;
use codex_protocol::models::ReasoningItemContent;
use codex_protocol::models::ReasoningItemReasoningSummary;
use pretty_assertions::assert_eq;
use regex_lite::Regex;
fn assistant_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
fn create_history_with_items(items: Vec<ResponseItem>) -> ContextManager {
let mut h = ContextManager::new();
h.record_items(items.iter());
h
}
fn user_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
fn reasoning_msg(text: &str) -> ResponseItem {
ResponseItem::Reasoning {
id: String::new(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".to_string(),
}],
content: Some(vec![ReasoningItemContent::ReasoningText {
text: text.to_string(),
}]),
encrypted_content: None,
}
}
#[test]
fn filters_non_api_messages() {
let mut h = ContextManager::default();
// System message is not API messages; Other is ignored.
let system = ResponseItem::Message {
id: None,
role: "system".to_string(),
content: vec![ContentItem::OutputText {
text: "ignored".to_string(),
}],
};
let reasoning = reasoning_msg("thinking...");
h.record_items([&system, &reasoning, &ResponseItem::Other]);
// User and assistant should be retained.
let u = user_msg("hi");
let a = assistant_msg("hello");
h.record_items([&u, &a]);
let items = h.contents();
assert_eq!(
items,
vec![
ResponseItem::Reasoning {
id: String::new(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".to_string(),
}],
content: Some(vec![ReasoningItemContent::ReasoningText {
text: "thinking...".to_string(),
}]),
encrypted_content: None,
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::OutputText {
text: "hi".to_string()
}]
},
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "hello".to_string()
}]
}
]
);
}
#[test]
fn get_history_for_prompt_drops_ghost_commits() {
let items = vec![ResponseItem::GhostSnapshot {
ghost_commit: GhostCommit::new("ghost-1".to_string(), None, Vec::new(), Vec::new()),
}];
let mut history = create_history_with_items(items);
let filtered = history.get_history_for_prompt();
assert_eq!(filtered, vec![]);
}
#[test]
fn remove_first_item_removes_matching_output_for_function_call() {
let items = vec![
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "call-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_removes_matching_call_for_output() {
let items = vec![
ResponseItem::FunctionCallOutput {
call_id: "call-2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-2".to_string(),
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_handles_local_shell_pair() {
let items = vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("call-3".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "call-3".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn remove_first_item_handles_custom_tool_pair() {
let items = vec![
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-1".to_string(),
name: "my_tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "tool-1".to_string(),
output: "ok".to_string(),
},
];
let mut h = create_history_with_items(items);
h.remove_first_item();
assert_eq!(h.contents(), vec![]);
}
#[test]
fn normalization_retains_local_shell_outputs() {
let items = vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "shell-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
];
let mut history = create_history_with_items(items.clone());
let normalized = history.get_history();
assert_eq!(normalized, items);
}
#[test]
fn record_items_truncates_function_call_output_content() {
let mut history = ContextManager::new();
let long_line = "a very long line to trigger truncation\n";
let long_output = long_line.repeat(2_500);
let item = ResponseItem::FunctionCallOutput {
call_id: "call-100".to_string(),
output: FunctionCallOutputPayload {
content: long_output.clone(),
success: Some(true),
..Default::default()
},
};
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
match &history.items[0] {
ResponseItem::FunctionCallOutput { output, .. } => {
assert_ne!(output.content, long_output);
assert!(
output.content.starts_with("Total output lines:"),
"expected truncated summary, got {}",
output.content
);
}
other => panic!("unexpected history item: {other:?}"),
}
}
#[test]
fn record_items_truncates_custom_tool_call_output_content() {
let mut history = ContextManager::new();
let line = "custom output that is very long\n";
let long_output = line.repeat(2_500);
let item = ResponseItem::CustomToolCallOutput {
call_id: "tool-200".to_string(),
output: long_output.clone(),
};
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
match &history.items[0] {
ResponseItem::CustomToolCallOutput { output, .. } => {
assert_ne!(output, &long_output);
assert!(
output.starts_with("Total output lines:"),
"expected truncated summary, got {output}"
);
}
other => panic!("unexpected history item: {other:?}"),
}
}
fn assert_truncated_message_matches(message: &str, line: &str, total_lines: usize) {
let pattern = truncated_message_pattern(line, total_lines);
let regex = Regex::new(&pattern).unwrap_or_else(|err| {
panic!("failed to compile regex {pattern}: {err}");
});
let captures = regex
.captures(message)
.unwrap_or_else(|| panic!("message failed to match pattern {pattern}: {message}"));
let body = captures
.name("body")
.expect("missing body capture")
.as_str();
assert!(
body.len() <= truncate::MODEL_FORMAT_MAX_BYTES,
"body exceeds byte limit: {} bytes",
body.len()
);
}
fn truncated_message_pattern(line: &str, total_lines: usize) -> String {
let head_take = truncate::MODEL_FORMAT_HEAD_LINES.min(total_lines);
let tail_take = truncate::MODEL_FORMAT_TAIL_LINES.min(total_lines.saturating_sub(head_take));
let omitted = total_lines.saturating_sub(head_take + tail_take);
let escaped_line = regex_lite::escape(line);
if omitted == 0 {
return format!(
r"(?s)^Total output lines: {total_lines}\n\n(?P<body>{escaped_line}.*\n\[\.{{3}} output truncated to fit {max_bytes} bytes \.{{3}}]\n\n.*)$",
max_bytes = truncate::MODEL_FORMAT_MAX_BYTES,
);
}
format!(
r"(?s)^Total output lines: {total_lines}\n\n(?P<body>{escaped_line}.*\n\[\.{{3}} omitted {omitted} of {total_lines} lines \.{{3}}]\n\n.*)$",
)
}
#[test]
fn format_exec_output_truncates_large_error() {
let line = "very long execution error line that should trigger truncation\n";
let large_error = line.repeat(2_500); // way beyond both byte and line limits
let truncated = truncate::format_output_for_model_body(&large_error);
let total_lines = large_error.lines().count();
assert_truncated_message_matches(&truncated, line, total_lines);
assert_ne!(truncated, large_error);
}
#[test]
fn format_exec_output_marks_byte_truncation_without_omitted_lines() {
let long_line = "a".repeat(truncate::MODEL_FORMAT_MAX_BYTES + 50);
let truncated = truncate::format_output_for_model_body(&long_line);
assert_ne!(truncated, long_line);
let marker_line = format!(
"[... output truncated to fit {} bytes ...]",
truncate::MODEL_FORMAT_MAX_BYTES
);
assert!(
truncated.contains(&marker_line),
"missing byte truncation marker: {truncated}"
);
assert!(
!truncated.contains("omitted"),
"line omission marker should not appear when no lines were dropped: {truncated}"
);
}
#[test]
fn format_exec_output_returns_original_when_within_limits() {
let content = "example output\n".repeat(10);
assert_eq!(truncate::format_output_for_model_body(&content), content);
}
#[test]
fn format_exec_output_reports_omitted_lines_and_keeps_head_and_tail() {
let total_lines = truncate::MODEL_FORMAT_MAX_LINES + 100;
let content: String = (0..total_lines)
.map(|idx| format!("line-{idx}\n"))
.collect();
let truncated = truncate::format_output_for_model_body(&content);
let omitted = total_lines - truncate::MODEL_FORMAT_MAX_LINES;
let expected_marker = format!("[... omitted {omitted} of {total_lines} lines ...]");
assert!(
truncated.contains(&expected_marker),
"missing omitted marker: {truncated}"
);
assert!(
truncated.contains("line-0\n"),
"expected head line to remain: {truncated}"
);
let last_line = format!("line-{}\n", total_lines - 1);
assert!(
truncated.contains(&last_line),
"expected tail line to remain: {truncated}"
);
}
#[test]
fn format_exec_output_prefers_line_marker_when_both_limits_exceeded() {
let total_lines = truncate::MODEL_FORMAT_MAX_LINES + 42;
let long_line = "x".repeat(256);
let content: String = (0..total_lines)
.map(|idx| format!("line-{idx}-{long_line}\n"))
.collect();
let truncated = truncate::format_output_for_model_body(&content);
assert!(
truncated.contains("[... omitted 42 of 298 lines ...]"),
"expected omitted marker when line count exceeds limit: {truncated}"
);
assert!(
!truncated.contains("output truncated to fit"),
"line omission marker should take precedence over byte marker: {truncated}"
);
}
#[test]
fn truncates_across_multiple_under_limit_texts_and_reports_omitted() {
// Arrange: several text items, none exceeding per-item limit, but total exceeds budget.
let budget = truncate::MODEL_FORMAT_MAX_BYTES;
let t1_len = (budget / 2).saturating_sub(10);
let t2_len = (budget / 2).saturating_sub(10);
let remaining_after_t1_t2 = budget.saturating_sub(t1_len + t2_len);
let t3_len = 50; // gets truncated to remaining_after_t1_t2
let t4_len = 5; // omitted
let t5_len = 7; // omitted
let t1 = "a".repeat(t1_len);
let t2 = "b".repeat(t2_len);
let t3 = "c".repeat(t3_len);
let t4 = "d".repeat(t4_len);
let t5 = "e".repeat(t5_len);
let item = ResponseItem::FunctionCallOutput {
call_id: "call-omit".to_string(),
output: FunctionCallOutputPayload {
content: "irrelevant".to_string(),
content_items: Some(vec![
FunctionCallOutputContentItem::InputText { text: t1 },
FunctionCallOutputContentItem::InputText { text: t2 },
FunctionCallOutputContentItem::InputImage {
image_url: "img:mid".to_string(),
},
FunctionCallOutputContentItem::InputText { text: t3 },
FunctionCallOutputContentItem::InputText { text: t4 },
FunctionCallOutputContentItem::InputText { text: t5 },
]),
success: Some(true),
},
};
let mut history = ContextManager::new();
history.record_items([&item]);
assert_eq!(history.items.len(), 1);
let json = serde_json::to_value(&history.items[0]).expect("serialize to json");
let output = json
.get("output")
.expect("output field")
.as_array()
.expect("array output");
// Expect: t1 (full), t2 (full), image, t3 (truncated), summary mentioning 2 omitted.
assert_eq!(output.len(), 5);
let first = output[0].as_object().expect("first obj");
assert_eq!(first.get("type").unwrap(), "input_text");
let first_text = first.get("text").unwrap().as_str().unwrap();
assert_eq!(first_text.len(), t1_len);
let second = output[1].as_object().expect("second obj");
assert_eq!(second.get("type").unwrap(), "input_text");
let second_text = second.get("text").unwrap().as_str().unwrap();
assert_eq!(second_text.len(), t2_len);
assert_eq!(
output[2],
serde_json::json!({"type": "input_image", "image_url": "img:mid"})
);
let fourth = output[3].as_object().expect("fourth obj");
assert_eq!(fourth.get("type").unwrap(), "input_text");
let fourth_text = fourth.get("text").unwrap().as_str().unwrap();
assert_eq!(fourth_text.len(), remaining_after_t1_t2);
let summary = output[4].as_object().expect("summary obj");
assert_eq!(summary.get("type").unwrap(), "input_text");
let summary_text = summary.get("text").unwrap().as_str().unwrap();
assert!(summary_text.contains("omitted 2 text items"));
}
//TODO(aibrahim): run CI in release mode.
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_function_call() {
let items = vec![ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "call-x".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_custom_tool_call() {
let items = vec![ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "tool-x".to_string(),
output: "aborted".to_string(),
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_adds_missing_output_for_local_shell_call_with_id() {
let items = vec![ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "shell-1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_removes_orphan_function_call_output() {
let items = vec![ResponseItem::FunctionCallOutput {
call_id: "orphan-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(h.contents(), vec![]);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_removes_orphan_custom_tool_call_output() {
let items = vec![ResponseItem::CustomToolCallOutput {
call_id: "orphan-2".to_string(),
output: "ok".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(h.contents(), vec![]);
}
#[cfg(not(debug_assertions))]
#[test]
fn normalize_mixed_inserts_and_removals() {
let items = vec![
// Will get an inserted output
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
// Orphan output that should be removed
ResponseItem::FunctionCallOutput {
call_id: "c2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
// Will get an inserted custom tool output
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
// Local shell call also gets an inserted function call output
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
];
let mut h = create_history_with_items(items);
h.normalize_history();
assert_eq!(
h.contents(),
vec![
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "c1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::CustomToolCallOutput {
call_id: "t1".to_string(),
output: "aborted".to_string(),
},
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
ResponseItem::FunctionCallOutput {
call_id: "s1".to_string(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
]
);
}
// In debug builds we panic on normalization errors instead of silently fixing them.
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_function_call_panics_in_debug() {
let items = vec![ResponseItem::FunctionCall {
id: None,
name: "do_it".to_string(),
arguments: "{}".to_string(),
call_id: "call-x".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_custom_tool_call_panics_in_debug() {
let items = vec![ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "tool-x".to_string(),
name: "custom".to_string(),
input: "{}".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_adds_missing_output_for_local_shell_call_with_id_panics_in_debug() {
let items = vec![ResponseItem::LocalShellCall {
id: None,
call_id: Some("shell-1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string(), "hi".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_removes_orphan_function_call_output_panics_in_debug() {
let items = vec![ResponseItem::FunctionCallOutput {
call_id: "orphan-1".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_removes_orphan_custom_tool_call_output_panics_in_debug() {
let items = vec![ResponseItem::CustomToolCallOutput {
call_id: "orphan-2".to_string(),
output: "ok".to_string(),
}];
let mut h = create_history_with_items(items);
h.normalize_history();
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn normalize_mixed_inserts_and_removals_panics_in_debug() {
let items = vec![
ResponseItem::FunctionCall {
id: None,
name: "f1".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
ResponseItem::FunctionCallOutput {
call_id: "c2".to_string(),
output: FunctionCallOutputPayload {
content: "ok".to_string(),
..Default::default()
},
},
ResponseItem::CustomToolCall {
id: None,
status: None,
call_id: "t1".to_string(),
name: "tool".to_string(),
input: "{}".to_string(),
},
ResponseItem::LocalShellCall {
id: None,
call_id: Some("s1".to_string()),
status: LocalShellStatus::Completed,
action: LocalShellAction::Exec(LocalShellExecAction {
command: vec!["echo".to_string()],
timeout_ms: None,
working_directory: None,
env: None,
user: None,
}),
},
];
let mut h = create_history_with_items(items);
h.normalize_history();
}

View File

@@ -1,6 +0,0 @@
mod history;
mod normalize;
mod truncate;
pub(crate) use history::ContextManager;
pub(crate) use truncate::format_output_for_model_body;

View File

@@ -1,213 +0,0 @@
use std::collections::HashSet;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseItem;
use crate::util::error_or_panic;
pub(crate) fn ensure_call_outputs_present(items: &mut Vec<ResponseItem>) {
// Collect synthetic outputs to insert immediately after their calls.
// Store the insertion position (index of call) alongside the item so
// we can insert in reverse order and avoid index shifting.
let mut missing_outputs_to_insert: Vec<(usize, ResponseItem)> = Vec::new();
for (idx, item) in items.iter().enumerate() {
match item {
ResponseItem::FunctionCall { call_id, .. } => {
let has_output = items.iter().any(|i| match i {
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Function call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
));
}
}
ResponseItem::CustomToolCall { call_id, .. } => {
let has_output = items.iter().any(|i| match i {
ResponseItem::CustomToolCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Custom tool call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::CustomToolCallOutput {
call_id: call_id.clone(),
output: "aborted".to_string(),
},
));
}
}
// LocalShellCall is represented in upstream streams by a FunctionCallOutput
ResponseItem::LocalShellCall { call_id, .. } => {
if let Some(call_id) = call_id.as_ref() {
let has_output = items.iter().any(|i| match i {
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} => existing == call_id,
_ => false,
});
if !has_output {
error_or_panic(format!(
"Local shell call output is missing for call id: {call_id}"
));
missing_outputs_to_insert.push((
idx,
ResponseItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
content: "aborted".to_string(),
..Default::default()
},
},
));
}
}
}
_ => {}
}
}
// Insert synthetic outputs in reverse index order to avoid re-indexing.
for (idx, output_item) in missing_outputs_to_insert.into_iter().rev() {
items.insert(idx + 1, output_item);
}
}
pub(crate) fn remove_orphan_outputs(items: &mut Vec<ResponseItem>) {
let function_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::FunctionCall { call_id, .. } => Some(call_id.clone()),
_ => None,
})
.collect();
let local_shell_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::LocalShellCall {
call_id: Some(call_id),
..
} => Some(call_id.clone()),
_ => None,
})
.collect();
let custom_tool_call_ids: HashSet<String> = items
.iter()
.filter_map(|i| match i {
ResponseItem::CustomToolCall { call_id, .. } => Some(call_id.clone()),
_ => None,
})
.collect();
items.retain(|item| match item {
ResponseItem::FunctionCallOutput { call_id, .. } => {
let has_match =
function_call_ids.contains(call_id) || local_shell_call_ids.contains(call_id);
if !has_match {
error_or_panic(format!(
"Orphan function call output for call id: {call_id}"
));
}
has_match
}
ResponseItem::CustomToolCallOutput { call_id, .. } => {
let has_match = custom_tool_call_ids.contains(call_id);
if !has_match {
error_or_panic(format!(
"Orphan custom tool call output for call id: {call_id}"
));
}
has_match
}
_ => true,
});
}
pub(crate) fn remove_corresponding_for(items: &mut Vec<ResponseItem>, item: &ResponseItem) {
match item {
ResponseItem::FunctionCall { call_id, .. } => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
ResponseItem::FunctionCallOutput { call_id, .. } => {
if let Some(pos) = items.iter().position(|i| {
matches!(i, ResponseItem::FunctionCall { call_id: existing, .. } if existing == call_id)
}) {
items.remove(pos);
} else if let Some(pos) = items.iter().position(|i| {
matches!(i, ResponseItem::LocalShellCall { call_id: Some(existing), .. } if existing == call_id)
}) {
items.remove(pos);
}
}
ResponseItem::CustomToolCall { call_id, .. } => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::CustomToolCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
ResponseItem::CustomToolCallOutput { call_id, .. } => {
remove_first_matching(
items,
|i| matches!(i, ResponseItem::CustomToolCall { call_id: existing, .. } if existing == call_id),
);
}
ResponseItem::LocalShellCall {
call_id: Some(call_id),
..
} => {
remove_first_matching(items, |i| {
matches!(
i,
ResponseItem::FunctionCallOutput {
call_id: existing, ..
} if existing == call_id
)
});
}
_ => {}
}
}
fn remove_first_matching<F>(items: &mut Vec<ResponseItem>, predicate: F)
where
F: Fn(&ResponseItem) -> bool,
{
if let Some(pos) = items.iter().position(predicate) {
items.remove(pos);
}
}

View File

@@ -1,128 +0,0 @@
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_utils_string::take_bytes_at_char_boundary;
use codex_utils_string::take_last_bytes_at_char_boundary;
// Model-formatting limits: clients get full streams; only content sent to the model is truncated.
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
pub(crate) fn globally_truncate_function_output_items(
items: &[FunctionCallOutputContentItem],
) -> Vec<FunctionCallOutputContentItem> {
let mut out: Vec<FunctionCallOutputContentItem> = Vec::with_capacity(items.len());
let mut remaining = MODEL_FORMAT_MAX_BYTES;
let mut omitted_text_items = 0usize;
for it in items {
match it {
FunctionCallOutputContentItem::InputText { text } => {
if remaining == 0 {
omitted_text_items += 1;
continue;
}
let len = text.len();
if len <= remaining {
out.push(FunctionCallOutputContentItem::InputText { text: text.clone() });
remaining -= len;
} else {
let slice = take_bytes_at_char_boundary(text, remaining);
if !slice.is_empty() {
out.push(FunctionCallOutputContentItem::InputText {
text: slice.to_string(),
});
}
remaining = 0;
}
}
// todo(aibrahim): handle input images; resize
FunctionCallOutputContentItem::InputImage { image_url } => {
out.push(FunctionCallOutputContentItem::InputImage {
image_url: image_url.clone(),
});
}
}
}
if omitted_text_items > 0 {
out.push(FunctionCallOutputContentItem::InputText {
text: format!("[omitted {omitted_text_items} text items ...]"),
});
}
out
}
pub(crate) fn format_output_for_model_body(content: &str) -> String {
// Head+tail truncation for the model: show the beginning and end with an elision.
// Clients still receive full streams; only this formatted summary is capped.
let total_lines = content.lines().count();
if content.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
return content.to_string();
}
let output = truncate_formatted_exec_output(content, total_lines);
format!("Total output lines: {total_lines}\n\n{output}")
}
fn truncate_formatted_exec_output(content: &str, total_lines: usize) -> String {
let segments: Vec<&str> = content.split_inclusive('\n').collect();
let head_take = MODEL_FORMAT_HEAD_LINES.min(segments.len());
let tail_take = MODEL_FORMAT_TAIL_LINES.min(segments.len().saturating_sub(head_take));
let omitted = segments.len().saturating_sub(head_take + tail_take);
let head_slice_end: usize = segments
.iter()
.take(head_take)
.map(|segment| segment.len())
.sum();
let tail_slice_start: usize = if tail_take == 0 {
content.len()
} else {
content.len()
- segments
.iter()
.rev()
.take(tail_take)
.map(|segment| segment.len())
.sum::<usize>()
};
let head_slice = &content[..head_slice_end];
let tail_slice = &content[tail_slice_start..];
let truncated_by_bytes = content.len() > MODEL_FORMAT_MAX_BYTES;
// this is a bit wrong. We are counting metadata lines and not just shell output lines.
let marker = if omitted > 0 {
Some(format!(
"\n[... omitted {omitted} of {total_lines} lines ...]\n\n"
))
} else if truncated_by_bytes {
Some(format!(
"\n[... output truncated to fit {MODEL_FORMAT_MAX_BYTES} bytes ...]\n\n"
))
} else {
None
};
let marker_len = marker.as_ref().map_or(0, String::len);
let base_head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
let head_budget = base_head_budget.min(MODEL_FORMAT_MAX_BYTES.saturating_sub(marker_len));
let head_part = take_bytes_at_char_boundary(head_slice, head_budget);
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(content.len()));
result.push_str(head_part);
if let Some(marker_text) = marker.as_ref() {
result.push_str(marker_text);
}
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
if remaining == 0 {
return result;
}
let tail_part = take_last_bytes_at_char_boundary(tail_slice, remaining);
result.push_str(tail_part);
result
}

File diff suppressed because it is too large Load Diff

View File

@@ -32,11 +32,12 @@ pub async fn discover_prompts_in_excluding(
while let Ok(Some(entry)) = entries.next_entry().await {
let path = entry.path();
let is_file_like = fs::metadata(&path)
let is_file = entry
.file_type()
.await
.map(|m| m.is_file())
.map(|ft| ft.is_file())
.unwrap_or(false);
if !is_file_like {
if !is_file {
continue;
}
// Only include Markdown files with a .md extension.
@@ -196,25 +197,6 @@ mod tests {
assert_eq!(names, vec!["good"]);
}
#[tokio::test]
#[cfg(unix)]
async fn discovers_symlinked_md_files() {
let tmp = tempdir().expect("create TempDir");
let dir = tmp.path();
// Create a real file
fs::write(dir.join("real.md"), b"real content").unwrap();
// Create a symlink to the real file
std::os::unix::fs::symlink(dir.join("real.md"), dir.join("link.md")).unwrap();
let found = discover_prompts_in(dir).await;
let names: Vec<String> = found.into_iter().map(|e| e.name).collect();
// Both real and link should be discovered, sorted alphabetically
assert_eq!(names, vec!["link", "real"]);
}
#[tokio::test]
async fn parses_frontmatter_and_strips_from_body() {
let tmp = tempdir().expect("create TempDir");

View File

@@ -109,9 +109,6 @@ pub enum CodexErr {
#[error("{0}")]
ConnectionFailed(ConnectionFailedError),
#[error("Quota exceeded. Check your plan and billing details.")]
QuotaExceeded,
#[error(
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
)]
@@ -138,9 +135,6 @@ pub enum CodexErr {
#[error("unsupported operation: {0}")]
UnsupportedOperation(String),
#[error("{0}")]
RefreshTokenFailed(RefreshTokenFailedError),
#[error("Fatal error: {0}")]
Fatal(String),
@@ -207,30 +201,6 @@ impl std::fmt::Display for ResponseStreamFailed {
}
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
#[error("{message}")]
pub struct RefreshTokenFailedError {
pub reason: RefreshTokenFailedReason,
pub message: String,
}
impl RefreshTokenFailedError {
pub fn new(reason: RefreshTokenFailedReason, message: impl Into<String>) -> Self {
Self {
reason,
message: message.into(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RefreshTokenFailedReason {
Expired,
Exhausted,
Revoked,
Other,
}
#[derive(Debug)]
pub struct UnexpectedResponseError {
pub status: StatusCode,
@@ -238,44 +208,18 @@ pub struct UnexpectedResponseError {
pub request_id: Option<String>,
}
const CLOUDFLARE_BLOCKED_MESSAGE: &str =
"Access blocked by Cloudflare. This usually happens when connecting from a restricted region";
impl UnexpectedResponseError {
fn friendly_message(&self) -> Option<String> {
if self.status != StatusCode::FORBIDDEN {
return None;
}
if !self.body.contains("Cloudflare") || !self.body.contains("blocked") {
return None;
}
let mut message = format!("{CLOUDFLARE_BLOCKED_MESSAGE} (status {})", self.status);
if let Some(id) = &self.request_id {
message.push_str(&format!(", request id: {id}"));
}
Some(message)
}
}
impl std::fmt::Display for UnexpectedResponseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(friendly) = self.friendly_message() {
write!(f, "{friendly}")
} else {
write!(
f,
"unexpected status {}: {}{}",
self.status,
self.body,
self.request_id
.as_ref()
.map(|id| format!(", request id: {id}"))
.unwrap_or_default()
)
}
write!(
f,
"unexpected status {}: {}{}",
self.status,
self.body,
self.request_id
.as_ref()
.map(|id| format!(", request id: {id}"))
.unwrap_or_default()
)
}
}
@@ -311,7 +255,7 @@ impl std::fmt::Display for UsageLimitReachedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let message = match self.plan_type.as_ref() {
Some(PlanType::Known(KnownPlan::Plus)) => format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits{}",
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits{}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
@@ -325,7 +269,7 @@ impl std::fmt::Display for UsageLimitReachedError {
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro)) => format!(
"You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits{}",
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits{}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Enterprise))
@@ -516,7 +460,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again later."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again later."
);
}
@@ -669,7 +613,7 @@ mod tests {
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
);
assert_eq!(err.to_string(), expected);
});
@@ -691,35 +635,6 @@ mod tests {
});
}
#[test]
fn unexpected_status_cloudflare_html_is_simplified() {
let err = UnexpectedResponseError {
status: StatusCode::FORBIDDEN,
body: "<html><body>Cloudflare error: Sorry, you have been blocked</body></html>"
.to_string(),
request_id: Some("ray-id".to_string()),
};
let status = StatusCode::FORBIDDEN.to_string();
assert_eq!(
err.to_string(),
format!("{CLOUDFLARE_BLOCKED_MESSAGE} (status {status}), request id: ray-id")
);
}
#[test]
fn unexpected_status_non_html_is_unchanged() {
let err = UnexpectedResponseError {
status: StatusCode::FORBIDDEN,
body: "plain text error".to_string(),
request_id: None,
};
let status = StatusCode::FORBIDDEN.to_string();
assert_eq!(
err.to_string(),
format!("unexpected status {status}: plain text error")
);
}
#[test]
fn usage_limit_reached_includes_hours_and_minutes() {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
@@ -732,7 +647,7 @@ mod tests {
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
);
assert_eq!(err.to_string(), expected);
});

View File

@@ -14,7 +14,6 @@ use tracing::warn;
use uuid::Uuid;
use crate::user_instructions::UserInstructions;
use crate::user_shell_command::is_user_shell_command_text;
fn is_session_prefix(text: &str) -> bool {
let trimmed = text.trim_start();
@@ -32,7 +31,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
for content_item in message.iter() {
match content_item {
ContentItem::InputText { text } => {
if is_session_prefix(text) || is_user_shell_command_text(text) {
if is_session_prefix(text) {
return None;
}
content.push(UserInput::Text { text: text.clone() });
@@ -198,14 +197,7 @@ mod tests {
text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(),
}],
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "<user_shell_command>echo 42</user_shell_command>".to_string(),
}],
},
];
];
for item in items {
let turn_item = parse_turn_item(&item);

View File

@@ -171,7 +171,6 @@ async fn exec_windows_sandbox(
params: ExecParams,
sandbox_policy: &SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
use crate::config::find_codex_home;
use codex_windows_sandbox::run_windows_sandbox_capture;
let ExecParams {
@@ -189,17 +188,8 @@ async fn exec_windows_sandbox(
};
let sandbox_cwd = cwd.clone();
let logs_base_dir = find_codex_home().ok();
let spawn_res = tokio::task::spawn_blocking(move || {
run_windows_sandbox_capture(
policy_str,
&sandbox_cwd,
command,
&cwd,
env,
timeout_ms,
logs_base_dir.as_deref(),
)
run_windows_sandbox_capture(policy_str, &sandbox_cwd, command, &cwd, env, timeout_ms)
})
.await;
@@ -313,10 +303,6 @@ pub(crate) mod errors {
SandboxTransformError::MissingLinuxSandboxExecutable => {
CodexErr::LandlockSandboxExecutableNotProvided
}
#[cfg(not(target_os = "macos"))]
SandboxTransformError::SeatbeltUnavailable => CodexErr::UnsupportedOperation(
"seatbelt sandbox is only available on macOS".to_string(),
),
}
}
}
@@ -518,7 +504,6 @@ async fn consume_truncated_output(
}
Err(_) => {
// timeout
kill_child_process_group(&mut child)?;
child.start_kill()?;
// Debatable whether `child.wait().await` should be called here.
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE), true)
@@ -526,7 +511,6 @@ async fn consume_truncated_output(
}
}
_ = tokio::signal::ctrl_c() => {
kill_child_process_group(&mut child)?;
child.start_kill()?;
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE), false)
}
@@ -623,38 +607,6 @@ fn synthetic_exit_status(code: i32) -> ExitStatus {
std::process::ExitStatus::from_raw(code as u32)
}
#[cfg(unix)]
fn kill_child_process_group(child: &mut Child) -> io::Result<()> {
use std::io::ErrorKind;
if let Some(pid) = child.id() {
let pid = pid as libc::pid_t;
let pgid = unsafe { libc::getpgid(pid) };
if pgid == -1 {
let err = std::io::Error::last_os_error();
if err.kind() != ErrorKind::NotFound {
return Err(err);
}
return Ok(());
}
let result = unsafe { libc::killpg(pgid, libc::SIGKILL) };
if result == -1 {
let err = std::io::Error::last_os_error();
if err.kind() != ErrorKind::NotFound {
return Err(err);
}
}
}
Ok(())
}
#[cfg(not(unix))]
fn kill_child_process_group(_: &mut Child) -> io::Result<()> {
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -727,51 +679,4 @@ mod tests {
let output = make_exec_output(exit_code, "", "", "");
assert!(is_likely_sandbox_denied(SandboxType::LinuxSeccomp, &output));
}
#[cfg(unix)]
#[tokio::test]
async fn kill_child_process_group_kills_grandchildren_on_timeout() -> Result<()> {
let command = vec![
"/bin/bash".to_string(),
"-c".to_string(),
"sleep 60 & echo $!; sleep 60".to_string(),
];
let env: HashMap<String, String> = std::env::vars().collect();
let params = ExecParams {
command,
cwd: std::env::current_dir()?,
timeout_ms: Some(500),
env,
with_escalated_permissions: None,
justification: None,
arg0: None,
};
let output = exec(params, SandboxType::None, &SandboxPolicy::ReadOnly, None).await?;
assert!(output.timed_out);
let stdout = output.stdout.from_utf8_lossy().text;
let pid_line = stdout.lines().next().unwrap_or("").trim();
let pid: i32 = pid_line.parse().map_err(|error| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Failed to parse pid from stdout '{pid_line}': {error}"),
)
})?;
let mut killed = false;
for _ in 0..20 {
// Use kill(pid, 0) to check if the process is alive.
if unsafe { libc::kill(pid, 0) } == -1
&& let Some(libc::ESRCH) = std::io::Error::last_os_error().raw_os_error()
{
killed = true;
break;
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
assert!(killed, "grandchild process with pid {pid} is still alive");
Ok(())
}
}

View File

@@ -29,9 +29,8 @@ pub enum Stage {
pub enum Feature {
/// Use the single unified PTY-backed exec tool.
UnifiedExec,
/// Use the shell command tool that takes `command` as a single string of
/// shell instead of an array of args passed to `execvp(3)`.
ShellCommandTool,
/// Use the streamable exec-command/write-stdin tool pair.
StreamableShell,
/// Enable experimental RMCP features such as OAuth login.
RmcpClient,
/// Include the freeform apply_patch tool.
@@ -119,9 +118,8 @@ impl Features {
self.enabled.contains(&f)
}
pub fn enable(&mut self, f: Feature) -> &mut Self {
pub fn enable(&mut self, f: Feature) {
self.enabled.insert(f);
self
}
pub fn disable(&mut self, f: Feature) -> &mut Self {
@@ -180,6 +178,7 @@ impl Features {
let base_legacy = LegacyFeatureToggles {
experimental_sandbox_command_assessment: cfg.experimental_sandbox_command_assessment,
experimental_use_freeform_apply_patch: cfg.experimental_use_freeform_apply_patch,
experimental_use_exec_command_tool: cfg.experimental_use_exec_command_tool,
experimental_use_unified_exec_tool: cfg.experimental_use_unified_exec_tool,
experimental_use_rmcp_client: cfg.experimental_use_rmcp_client,
tools_web_search: cfg.tools.as_ref().and_then(|t| t.web_search),
@@ -198,7 +197,7 @@ impl Features {
.experimental_sandbox_command_assessment,
experimental_use_freeform_apply_patch: config_profile
.experimental_use_freeform_apply_patch,
experimental_use_exec_command_tool: config_profile.experimental_use_exec_command_tool,
experimental_use_unified_exec_tool: config_profile.experimental_use_unified_exec_tool,
experimental_use_rmcp_client: config_profile.experimental_use_rmcp_client,
tools_web_search: config_profile.tools_web_search,
@@ -254,8 +253,8 @@ pub const FEATURES: &[FeatureSpec] = &[
default_enabled: false,
},
FeatureSpec {
id: Feature::ShellCommandTool,
key: "shell_command_tool",
id: Feature::StreamableShell,
key: "streamable_shell",
stage: Stage::Experimental,
default_enabled: false,
},
@@ -293,7 +292,7 @@ pub const FEATURES: &[FeatureSpec] = &[
id: Feature::GhostCommit,
key: "ghost_commit",
stage: Stage::Experimental,
default_enabled: true,
default_enabled: false,
},
FeatureSpec {
id: Feature::WindowsSandbox,

View File

@@ -17,6 +17,10 @@ const ALIASES: &[Alias] = &[
legacy_key: "experimental_use_unified_exec_tool",
feature: Feature::UnifiedExec,
},
Alias {
legacy_key: "experimental_use_exec_command_tool",
feature: Feature::StreamableShell,
},
Alias {
legacy_key: "experimental_use_rmcp_client",
feature: Feature::RmcpClient,
@@ -50,6 +54,7 @@ pub struct LegacyFeatureToggles {
pub include_apply_patch_tool: Option<bool>,
pub experimental_sandbox_command_assessment: Option<bool>,
pub experimental_use_freeform_apply_patch: Option<bool>,
pub experimental_use_exec_command_tool: Option<bool>,
pub experimental_use_unified_exec_tool: Option<bool>,
pub experimental_use_rmcp_client: Option<bool>,
pub tools_web_search: Option<bool>,
@@ -76,6 +81,12 @@ impl LegacyFeatureToggles {
self.experimental_use_freeform_apply_patch,
"experimental_use_freeform_apply_patch",
);
set_if_some(
features,
Feature::StreamableShell,
self.experimental_use_exec_command_tool,
"experimental_use_exec_command_tool",
);
set_if_some(
features,
Feature::UnifiedExec,
@@ -112,7 +123,7 @@ fn set_if_some(
if let Some(enabled) = maybe_value {
set_feature(features, feature, enabled);
log_alias(alias_key, feature);
features.record_legacy_usage(alias_key, feature);
features.record_legacy_usage_force(alias_key, feature);
}
}

View File

@@ -18,7 +18,7 @@ mod codex_delegate;
mod command_safety;
pub mod config;
pub mod config_loader;
mod context_manager;
mod conversation_history;
pub mod custom_prompts;
mod environment_context;
pub mod error;
@@ -75,19 +75,16 @@ pub use rollout::find_conversation_path_by_id_str;
pub use rollout::list::ConversationItem;
pub use rollout::list::ConversationsPage;
pub use rollout::list::Cursor;
pub use rollout::list::parse_cursor;
pub use rollout::list::read_head_for_summary;
mod function_tool;
mod state;
mod tasks;
mod user_notification;
mod user_shell_command;
pub mod util;
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
pub use command_safety::is_safe_command;
pub use safety::get_platform_sandbox;
pub use safety::set_windows_sandbox_enabled;
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
// `codex_core::protocol::...` references continue to work across the workspace.
pub use codex_protocol::protocol;
@@ -100,12 +97,11 @@ pub use client_common::Prompt;
pub use client_common::REVIEW_PROMPT;
pub use client_common::ResponseEvent;
pub use client_common::ResponseStream;
pub use codex::compact::content_items_to_text;
pub use codex_protocol::models::ContentItem;
pub use codex_protocol::models::LocalShellAction;
pub use codex_protocol::models::LocalShellExecAction;
pub use codex_protocol::models::LocalShellStatus;
pub use codex_protocol::models::ResponseItem;
pub use compact::content_items_to_text;
pub use event_mapping::parse_turn_item;
pub mod compact;
pub mod otel_init;

View File

@@ -1,6 +1,5 @@
use crate::config::types::ReasoningSummaryFormat;
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
use crate::tools::spec::ConfigShellToolType;
/// The `instructions` field in the payload sent to a model should always start
/// with this content.
@@ -30,6 +29,12 @@ pub struct ModelFamily {
// Define if we need a special handling of reasoning summary
pub reasoning_summary_format: ReasoningSummaryFormat,
// This should be set to true when the model expects a tool named
// "local_shell" to be provided. Its contract must be understood natively by
// the model such that its description can be omitted.
// See https://platform.openai.com/docs/guides/tools-local-shell
pub uses_local_shell_tool: bool,
/// Whether this model supports parallel tool calls when using the
/// Responses API.
pub supports_parallel_tool_calls: bool,
@@ -52,9 +57,6 @@ pub struct ModelFamily {
/// If the model family supports setting the verbosity level when using Responses API.
pub support_verbosity: bool,
/// Preferred shell tool type for this model family when features do not override it.
pub shell_type: ConfigShellToolType,
}
macro_rules! model_family {
@@ -62,20 +64,19 @@ macro_rules! model_family {
$slug:expr, $family:expr $(, $key:ident : $value:expr )* $(,)?
) => {{
// defaults
#[allow(unused_mut)]
let mut mf = ModelFamily {
slug: $slug.to_string(),
family: $family.to_string(),
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
reasoning_summary_format: ReasoningSummaryFormat::None,
uses_local_shell_tool: false,
supports_parallel_tool_calls: false,
apply_patch_tool_type: None,
base_instructions: BASE_INSTRUCTIONS.to_string(),
experimental_supported_tools: Vec::new(),
effective_context_window_percent: 95,
support_verbosity: false,
shell_type: ConfigShellToolType::Default,
};
// apply overrides
$(
@@ -104,8 +105,8 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
model_family!(
slug, "codex-mini-latest",
supports_reasoning_summaries: true,
uses_local_shell_tool: true,
needs_special_apply_patch_instructions: true,
shell_type: ConfigShellToolType::Local,
)
} else if slug.starts_with("gpt-4.1") {
model_family!(
@@ -118,8 +119,6 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
} else if slug.starts_with("gpt-3.5") {
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
} else if slug.starts_with("porcupine") {
model_family!(slug, "porcupine", shell_type: ConfigShellToolType::UnifiedExec)
} else if slug.starts_with("test-gpt-5-codex") {
model_family!(
slug, slug,
@@ -182,12 +181,12 @@ pub fn derive_default_model_family(model: &str) -> ModelFamily {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
reasoning_summary_format: ReasoningSummaryFormat::None,
uses_local_shell_tool: false,
supports_parallel_tool_calls: false,
apply_patch_tool_type: None,
base_instructions: BASE_INSTRUCTIONS.to_string(),
experimental_supported_tools: Vec::new(),
effective_context_window_percent: 95,
support_verbosity: false,
shell_type: ConfigShellToolType::Default,
}
}

View File

@@ -1,4 +1,3 @@
use crate::bash::extract_bash_command;
use crate::bash::try_parse_shell;
use crate::bash::try_parse_word_only_commands_sequence;
use codex_protocol::parse_command::ParsedCommand;
@@ -854,29 +853,6 @@ mod tests {
}],
);
}
#[test]
fn bin_bash_lc_sed() {
assert_parsed(
&shlex_split_safe("/bin/bash -lc 'sed -n '1,10p' Cargo.toml'"),
vec![ParsedCommand::Read {
cmd: "sed -n '1,10p' Cargo.toml".to_string(),
name: "Cargo.toml".to_string(),
path: PathBuf::from("Cargo.toml"),
}],
);
}
#[test]
fn bin_zsh_lc_sed() {
assert_parsed(
&shlex_split_safe("/bin/zsh -lc 'sed -n '1,10p' Cargo.toml'"),
vec![ParsedCommand::Read {
cmd: "sed -n '1,10p' Cargo.toml".to_string(),
name: "Cargo.toml".to_string(),
path: PathBuf::from("Cargo.toml"),
}],
);
}
}
pub fn parse_command_impl(command: &[String]) -> Vec<ParsedCommand> {
@@ -1190,13 +1166,18 @@ fn parse_find_query_and_path(tail: &[String]) -> (Option<String>, Option<String>
}
fn parse_shell_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
let (_, script) = extract_bash_command(original)?;
let [shell, flag, script] = original else {
return None;
};
if flag != "-lc" || !(shell == "bash" || shell == "zsh") {
return None;
}
if let Some(tree) = try_parse_shell(script)
&& let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script)
&& !all_commands.is_empty()
{
let script_tokens = shlex_split(script).unwrap_or_else(|| vec![script.to_string()]);
let script_tokens = shlex_split(script)
.unwrap_or_else(|| vec![shell.clone(), flag.clone(), script.clone()]);
// Strip small formatting helpers (e.g., head/tail/awk/wc/etc) so we
// bias toward the primary command when pipelines are present.
// First, drop obvious small formatting helpers (e.g., wc/awk/etc).
@@ -1205,7 +1186,7 @@ fn parse_shell_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
let filtered_commands = drop_small_formatting_commands(all_commands);
if filtered_commands.is_empty() {
return Some(vec![ParsedCommand::Unknown {
cmd: script.to_string(),
cmd: script.clone(),
}]);
}
// Build parsed commands, tracking `cd` segments to compute effective file paths.
@@ -1269,7 +1250,7 @@ fn parse_shell_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
});
if has_pipe && has_sed_n {
ParsedCommand::Read {
cmd: script.to_string(),
cmd: script.clone(),
name,
path,
}
@@ -1314,7 +1295,7 @@ fn parse_shell_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
return Some(commands);
}
Some(vec![ParsedCommand::Unknown {
cmd: script.to_string(),
cmd: script.clone(),
}])
}

View File

@@ -273,7 +273,7 @@ async fn traverse_directories_for_paths(
/// Pagination cursor token format: "<file_ts>|<uuid>" where `file_ts` matches the
/// filename timestamp portion (YYYY-MM-DDThh-mm-ss) used in rollout filenames.
/// The cursor orders files by timestamp desc, then UUID desc.
pub fn parse_cursor(token: &str) -> Option<Cursor> {
fn parse_cursor(token: &str) -> Option<Cursor> {
let (file_ts, uuid_str) = token.split_once('|')?;
let Ok(uuid) = Uuid::parse_str(uuid_str) else {

View File

@@ -25,6 +25,16 @@ use tracing::warn;
const SANDBOX_ASSESSMENT_TIMEOUT: Duration = Duration::from_secs(5);
const SANDBOX_RISK_CATEGORY_VALUES: &[&str] = &[
"data_deletion",
"data_exfiltration",
"privilege_escalation",
"system_modification",
"network_access",
"resource_exhaustion",
"compliance",
];
#[derive(Template)]
#[template(path = "sandboxing/assessment_prompt.md", escape = "none")]
struct SandboxAssessmentPromptTemplate<'a> {
@@ -166,26 +176,27 @@ pub(crate) async fn assess_command(
call_id,
"success",
Some(assessment.risk_level),
&assessment.risk_categories,
duration,
);
return Some(assessment);
}
Err(err) => {
warn!("failed to parse sandbox assessment JSON: {err}");
parent_otel.sandbox_assessment(call_id, "parse_error", None, duration);
parent_otel.sandbox_assessment(call_id, "parse_error", None, &[], duration);
}
},
Ok(Ok(None)) => {
warn!("sandbox assessment response did not include any message");
parent_otel.sandbox_assessment(call_id, "no_output", None, duration);
parent_otel.sandbox_assessment(call_id, "no_output", None, &[], duration);
}
Ok(Err(err)) => {
warn!("sandbox assessment failed: {err}");
parent_otel.sandbox_assessment(call_id, "model_error", None, duration);
parent_otel.sandbox_assessment(call_id, "model_error", None, &[], duration);
}
Err(_) => {
warn!("sandbox assessment timed out");
parent_otel.sandbox_assessment(call_id, "timeout", None, duration);
parent_otel.sandbox_assessment(call_id, "timeout", None, &[], duration);
}
}
@@ -218,7 +229,7 @@ fn sandbox_roots_for_prompt(policy: &SandboxPolicy, cwd: &Path) -> Vec<PathBuf>
fn sandbox_assessment_schema() -> serde_json::Value {
json!({
"type": "object",
"required": ["description", "risk_level"],
"required": ["description", "risk_level", "risk_categories"],
"properties": {
"description": {
"type": "string",
@@ -229,6 +240,13 @@ fn sandbox_assessment_schema() -> serde_json::Value {
"type": "string",
"enum": ["low", "medium", "high"]
},
"risk_categories": {
"type": "array",
"items": {
"type": "string",
"enum": SANDBOX_RISK_CATEGORY_VALUES
}
}
},
"additionalProperties": false
})

View File

@@ -14,11 +14,8 @@ use crate::exec::StdoutStream;
use crate::exec::execute_exec_env;
use crate::landlock::create_linux_sandbox_command_args;
use crate::protocol::SandboxPolicy;
#[cfg(target_os = "macos")]
use crate::seatbelt::MACOS_PATH_TO_SEATBELT_EXECUTABLE;
#[cfg(target_os = "macos")]
use crate::seatbelt::create_seatbelt_command_args;
#[cfg(target_os = "macos")]
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
use crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use crate::tools::sandboxing::SandboxablePreference;
@@ -59,9 +56,6 @@ pub enum SandboxPreference {
pub(crate) enum SandboxTransformError {
#[error("missing codex-linux-sandbox executable path")]
MissingLinuxSandboxExecutable,
#[cfg(not(target_os = "macos"))]
#[error("seatbelt sandbox is only available on macOS")]
SeatbeltUnavailable,
}
#[derive(Default)]
@@ -113,7 +107,6 @@ impl SandboxManager {
let (command, sandbox_env, arg0_override) = match sandbox {
SandboxType::None => (command, HashMap::new(), None),
#[cfg(target_os = "macos")]
SandboxType::MacosSeatbelt => {
let mut seatbelt_env = HashMap::new();
seatbelt_env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
@@ -124,8 +117,6 @@ impl SandboxManager {
full_command.append(&mut args);
(full_command, seatbelt_env, None)
}
#[cfg(not(target_os = "macos"))]
SandboxType::MacosSeatbelt => return Err(SandboxTransformError::SeatbeltUnavailable),
SandboxType::LinuxSeccomp => {
let exe = codex_linux_sandbox_exe
.ok_or(SandboxTransformError::MissingLinuxSandboxExecutable)?;

View File

@@ -1,7 +1,4 @@
#![cfg(target_os = "macos")]
use std::collections::HashMap;
use std::ffi::CStr;
use std::path::Path;
use std::path::PathBuf;
use tokio::process::Child;
@@ -12,7 +9,6 @@ use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
const MACOS_SEATBELT_BASE_POLICY: &str = include_str!("seatbelt_base_policy.sbpl");
const MACOS_SEATBELT_NETWORK_POLICY: &str = include_str!("seatbelt_network_policy.sbpl");
/// When working with `sandbox-exec`, only consider `sandbox-exec` in `/usr/bin`
/// to defend against an attacker trying to inject a malicious version on the
@@ -48,24 +44,27 @@ pub(crate) fn create_seatbelt_command_args(
sandbox_policy: &SandboxPolicy,
sandbox_policy_cwd: &Path,
) -> Vec<String> {
let (file_write_policy, file_write_dir_params) = {
let (file_write_policy, extra_cli_args) = {
if sandbox_policy.has_full_disk_write_access() {
// Allegedly, this is more permissive than `(allow file-write*)`.
(
r#"(allow file-write* (regex #"^/"))"#.to_string(),
Vec::new(),
Vec::<String>::new(),
)
} else {
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(sandbox_policy_cwd);
let mut writable_folder_policies: Vec<String> = Vec::new();
let mut file_write_params = Vec::new();
let mut cli_args: Vec<String> = Vec::new();
for (index, wr) in writable_roots.iter().enumerate() {
// Canonicalize to avoid mismatches like /var vs /private/var on macOS.
let canonical_root = wr.root.canonicalize().unwrap_or_else(|_| wr.root.clone());
let root_param = format!("WRITABLE_ROOT_{index}");
file_write_params.push((root_param.clone(), canonical_root));
cli_args.push(format!(
"-D{root_param}={}",
canonical_root.to_string_lossy()
));
if wr.read_only_subpaths.is_empty() {
writable_folder_policies.push(format!("(subpath (param \"{root_param}\"))"));
@@ -77,9 +76,9 @@ pub(crate) fn create_seatbelt_command_args(
for (subpath_index, ro) in wr.read_only_subpaths.iter().enumerate() {
let canonical_ro = ro.canonicalize().unwrap_or_else(|_| ro.clone());
let ro_param = format!("WRITABLE_ROOT_{index}_RO_{subpath_index}");
cli_args.push(format!("-D{ro_param}={}", canonical_ro.to_string_lossy()));
require_parts
.push(format!("(require-not (subpath (param \"{ro_param}\")))"));
file_write_params.push((ro_param, canonical_ro));
}
let policy_component = format!("(require-all {} )", require_parts.join(" "));
writable_folder_policies.push(policy_component);
@@ -87,13 +86,13 @@ pub(crate) fn create_seatbelt_command_args(
}
if writable_folder_policies.is_empty() {
("".to_string(), Vec::new())
("".to_string(), Vec::<String>::new())
} else {
let file_write_policy = format!(
"(allow file-write*\n{}\n)",
writable_folder_policies.join(" ")
);
(file_write_policy, file_write_params)
(file_write_policy, cli_args)
}
}
};
@@ -106,7 +105,7 @@ pub(crate) fn create_seatbelt_command_args(
// TODO(mbolin): apply_patch calls must also honor the SandboxPolicy.
let network_policy = if sandbox_policy.has_full_network_access() {
MACOS_SEATBELT_NETWORK_POLICY
"(allow network-outbound)\n(allow network-inbound)\n(allow system-socket)"
} else {
""
};
@@ -115,49 +114,17 @@ pub(crate) fn create_seatbelt_command_args(
"{MACOS_SEATBELT_BASE_POLICY}\n{file_read_policy}\n{file_write_policy}\n{network_policy}"
);
let dir_params = [file_write_dir_params, macos_dir_params()].concat();
let mut seatbelt_args: Vec<String> = vec!["-p".to_string(), full_policy];
let definition_args = dir_params
.into_iter()
.map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy()));
seatbelt_args.extend(definition_args);
seatbelt_args.extend(extra_cli_args);
seatbelt_args.push("--".to_string());
seatbelt_args.extend(command);
seatbelt_args
}
/// Wraps libc::confstr to return a String.
fn confstr(name: libc::c_int) -> Option<String> {
let mut buf = vec![0_i8; (libc::PATH_MAX as usize) + 1];
let len = unsafe { libc::confstr(name, buf.as_mut_ptr(), buf.len()) };
if len == 0 {
return None;
}
// confstr guarantees NUL-termination when len > 0.
let cstr = unsafe { CStr::from_ptr(buf.as_ptr()) };
cstr.to_str().ok().map(ToString::to_string)
}
/// Wraps confstr to return a canonicalized PathBuf.
fn confstr_path(name: libc::c_int) -> Option<PathBuf> {
let s = confstr(name)?;
let path = PathBuf::from(s);
path.canonicalize().ok().or(Some(path))
}
fn macos_dir_params() -> Vec<(String, PathBuf)> {
if let Some(p) = confstr_path(libc::_CS_DARWIN_USER_CACHE_DIR) {
return vec![("DARWIN_USER_CACHE_DIR".to_string(), p)];
}
vec![]
}
#[cfg(test)]
mod tests {
use super::MACOS_SEATBELT_BASE_POLICY;
use super::create_seatbelt_command_args;
use super::macos_dir_params;
use crate::protocol::SandboxPolicy;
use pretty_assertions::assert_eq;
use std::fs;
@@ -167,6 +134,11 @@ mod tests {
#[test]
fn create_seatbelt_args_with_read_only_git_subpath() {
if cfg!(target_os = "windows") {
// /tmp does not exist on Windows, so skip this test.
return;
}
// Create a temporary workspace with two writable roots: one containing
// a top-level .git directory and one without it.
let tmp = TempDir::new().expect("tempdir");
@@ -227,12 +199,6 @@ mod tests {
format!("-DWRITABLE_ROOT_2={}", cwd.to_string_lossy()),
];
expected_args.extend(
macos_dir_params()
.into_iter()
.map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy())),
);
expected_args.extend(vec![
"--".to_string(),
"/bin/echo".to_string(),
@@ -244,6 +210,11 @@ mod tests {
#[test]
fn create_seatbelt_args_for_cwd_as_git_repo() {
if cfg!(target_os = "windows") {
// /tmp does not exist on Windows, so skip this test.
return;
}
// Create a temporary workspace with two writable roots: one containing
// a top-level .git directory and one without it.
let tmp = TempDir::new().expect("tempdir");
@@ -321,12 +292,6 @@ mod tests {
expected_args.push(format!("-DWRITABLE_ROOT_2={p}"));
}
expected_args.extend(
macos_dir_params()
.into_iter()
.map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy())),
);
expected_args.extend(vec![
"--".to_string(),
"/bin/echo".to_string(),

View File

@@ -49,7 +49,6 @@
(sysctl-name "hw.packages")
(sysctl-name "hw.pagesize_compat")
(sysctl-name "hw.pagesize")
(sysctl-name "hw.physicalcpu")
(sysctl-name "hw.physicalcpu_max")
(sysctl-name "hw.tbfrequency_compat")
(sysctl-name "hw.vectorunit")

View File

@@ -1,30 +0,0 @@
; when network access is enabled, these policies are added after those in seatbelt_base_policy.sbpl
; Ref https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/network.sb;drc=f8f264d5e4e7509c913f4c60c2639d15905a07e4
(allow network-outbound)
(allow network-inbound)
(allow system-socket)
(allow mach-lookup
; Used to look up the _CS_DARWIN_USER_CACHE_DIR in the sandbox.
(global-name "com.apple.bsd.dirhelper")
(global-name "com.apple.system.opendirectoryd.membership")
; Communicate with the security server for TLS certificate information.
(global-name "com.apple.SecurityServer")
(global-name "com.apple.networkd")
(global-name "com.apple.ocspd")
(global-name "com.apple.trustd.agent")
; Read network configuration.
(global-name "com.apple.SystemConfiguration.DNSConfiguration")
(global-name "com.apple.SystemConfiguration.configd")
)
(allow sysctl-read
(sysctl-name-regex #"^net.routetable")
)
(allow file-write*
(subpath (param "DARWIN_USER_CACHE_DIR"))
)

View File

@@ -31,37 +31,16 @@ pub enum Shell {
impl Shell {
pub fn name(&self) -> Option<String> {
match self {
Shell::Zsh(ZshShell { shell_path, .. }) | Shell::Bash(BashShell { shell_path, .. }) => {
std::path::Path::new(shell_path)
.file_name()
.map(|s| s.to_string_lossy().to_string())
}
Shell::Zsh(zsh) => std::path::Path::new(&zsh.shell_path)
.file_name()
.map(|s| s.to_string_lossy().to_string()),
Shell::Bash(bash) => std::path::Path::new(&bash.shell_path)
.file_name()
.map(|s| s.to_string_lossy().to_string()),
Shell::PowerShell(ps) => Some(ps.exe.clone()),
Shell::Unknown => None,
}
}
/// Takes a string of shell and returns the full list of command args to
/// use with `exec()` to run the shell command.
pub fn derive_exec_args(&self, command: &str, use_login_shell: bool) -> Vec<String> {
match self {
Shell::Zsh(ZshShell { shell_path, .. }) | Shell::Bash(BashShell { shell_path, .. }) => {
let arg = if use_login_shell { "-lc" } else { "-c" };
vec![shell_path.clone(), arg.to_string(), command.to_string()]
}
Shell::PowerShell(ps) => {
let mut args = vec![ps.exe.clone(), "-NoLogo".to_string()];
if !use_login_shell {
args.push("-NoProfile".to_string());
}
args.push("-Command".to_string());
args.push(command.to_string());
args
}
Shell::Unknown => shlex::split(command).unwrap_or_else(|| vec![command.to_string()]),
}
}
}
#[cfg(unix)]

View File

@@ -64,32 +64,22 @@ pub(crate) async fn spawn_child_async(
// any child processes that were spawned as part of a `"shell"` tool call
// to also be terminated.
#[cfg(unix)]
// This relies on prctl(2), so it only works on Linux.
#[cfg(target_os = "linux")]
unsafe {
#[cfg(target_os = "linux")]
let parent_pid = libc::getpid();
cmd.pre_exec(move || {
if libc::setpgid(0, 0) == -1 {
cmd.pre_exec(|| {
// This prctl call effectively requests, "deliver SIGTERM when my
// current parent dies."
if libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) == -1 {
return Err(std::io::Error::last_os_error());
}
// This relies on prctl(2), so it only works on Linux.
#[cfg(target_os = "linux")]
{
// This prctl call effectively requests, "deliver SIGTERM when my
// current parent dies."
if libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) == -1 {
return Err(std::io::Error::last_os_error());
}
// Though if there was a race condition and this pre_exec() block is
// run _after_ the parent (i.e., the Codex process) has already
// exited, then parent will be the closest configured "subreaper"
// ancestor process, or PID 1 (init). If the Codex process has exited
// already, so should the child process.
if libc::getppid() != parent_pid {
libc::raise(libc::SIGTERM);
}
// Though if there was a race condition and this pre_exec() block is
// run _after_ the parent (i.e., the Codex process) has already
// exited, then the parent is the _init_ process (which will never
// die), so we should just terminate the child process now.
if libc::getppid() == 1 {
libc::raise(libc::SIGTERM);
}
Ok(())
});

View File

@@ -3,7 +3,7 @@
use codex_protocol::models::ResponseItem;
use crate::codex::SessionConfiguration;
use crate::context_manager::ContextManager;
use crate::conversation_history::ConversationHistory;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use crate::protocol::TokenUsageInfo;
@@ -11,7 +11,7 @@ use crate::protocol::TokenUsageInfo;
/// Persistent, session-scoped state previously stored directly on `Session`.
pub(crate) struct SessionState {
pub(crate) session_configuration: SessionConfiguration,
pub(crate) history: ContextManager,
pub(crate) history: ConversationHistory,
pub(crate) latest_rate_limits: Option<RateLimitSnapshot>,
}
@@ -20,7 +20,7 @@ impl SessionState {
pub(crate) fn new(session_configuration: SessionConfiguration) -> Self {
Self {
session_configuration,
history: ContextManager::new(),
history: ConversationHistory::new(),
latest_rate_limits: None,
}
}
@@ -34,7 +34,7 @@ impl SessionState {
self.history.record_items(items)
}
pub(crate) fn clone_history(&self) -> ContextManager {
pub(crate) fn clone_history(&self) -> ConversationHistory {
self.history.clone()
}

View File

@@ -4,7 +4,7 @@ use async_trait::async_trait;
use tokio_util::sync::CancellationToken;
use crate::codex::TurnContext;
use crate::compact;
use crate::codex::compact;
use crate::state::TaskKind;
use codex_protocol::user_input::UserInput;

View File

@@ -75,12 +75,12 @@ async fn start_review_conversation(
// Avoid loading project docs; reviewer only needs findings
sub_agent_config.project_doc_max_bytes = 0;
// Carry over review-only feature restrictions so the delegate cannot
// re-enable blocked tools (web search, view image).
// re-enable blocked tools (web search, view image, streamable shell).
sub_agent_config
.features
.disable(crate::features::Feature::WebSearchRequest)
.disable(crate::features::Feature::ViewImageTool);
.disable(crate::features::Feature::ViewImageTool)
.disable(crate::features::Feature::StreamableShell);
// Set explicit review rubric for the sub-agent
sub_agent_config.base_instructions = Some(crate::REVIEW_PROMPT.to_string());
(run_codex_conversation_one_shot(

View File

@@ -1,35 +1,28 @@
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use codex_async_utils::CancelErr;
use codex_async_utils::OrCancelExt;
use codex_protocol::models::ShellToolCallParams;
use codex_protocol::user_input::UserInput;
use tokio::sync::Mutex;
use tokio_util::sync::CancellationToken;
use tracing::error;
use uuid::Uuid;
use crate::codex::TurnContext;
use crate::exec::ExecToolCallOutput;
use crate::exec::SandboxType;
use crate::exec::StdoutStream;
use crate::exec::StreamOutput;
use crate::exec::execute_exec_env;
use crate::exec_env::create_env;
use crate::parse_command::parse_command;
use crate::protocol::EventMsg;
use crate::protocol::ExecCommandBeginEvent;
use crate::protocol::ExecCommandEndEvent;
use crate::protocol::SandboxPolicy;
use crate::protocol::TaskStartedEvent;
use crate::sandboxing::ExecEnv;
use crate::state::TaskKind;
use crate::tools::format_exec_output_str;
use crate::user_shell_command::user_shell_command_record_item;
use crate::tools::context::ToolPayload;
use crate::tools::parallel::ToolCallRuntime;
use crate::tools::router::ToolCall;
use crate::tools::router::ToolRouter;
use crate::turn_diff_tracker::TurnDiffTracker;
use super::SessionTask;
use super::SessionTaskContext;
const USER_SHELL_TOOL_NAME: &str = "local_shell";
#[derive(Clone)]
pub(crate) struct UserShellCommandTask {
command: String,
@@ -63,131 +56,59 @@ impl SessionTask for UserShellCommandTask {
// Execute the user's script under their default shell when known; this
// allows commands that use shell features (pipes, &&, redirects, etc.).
// We do not source rc files or otherwise reformat the script.
let use_login_shell = true;
let shell_invocation = session
.user_shell()
.derive_exec_args(&self.command, use_login_shell);
let call_id = Uuid::new_v4().to_string();
let raw_command = self.command.clone();
let parsed_cmd = parse_command(&shell_invocation);
session
.send_event(
turn_context.as_ref(),
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: call_id.clone(),
command: shell_invocation.clone(),
cwd: turn_context.cwd.clone(),
parsed_cmd,
is_user_shell_command: true,
}),
)
.await;
let exec_env = ExecEnv {
command: shell_invocation,
cwd: turn_context.cwd.clone(),
env: create_env(&turn_context.shell_environment_policy),
timeout_ms: None,
sandbox: SandboxType::None,
with_escalated_permissions: None,
justification: None,
arg0: None,
let shell_invocation = match session.user_shell() {
crate::shell::Shell::Zsh(zsh) => vec![
zsh.shell_path.clone(),
"-lc".to_string(),
self.command.clone(),
],
crate::shell::Shell::Bash(bash) => vec![
bash.shell_path.clone(),
"-lc".to_string(),
self.command.clone(),
],
crate::shell::Shell::PowerShell(ps) => vec![
ps.exe.clone(),
"-NoProfile".to_string(),
"-Command".to_string(),
self.command.clone(),
],
crate::shell::Shell::Unknown => {
shlex::split(&self.command).unwrap_or_else(|| vec![self.command.clone()])
}
};
let stdout_stream = Some(StdoutStream {
sub_id: turn_context.sub_id.clone(),
call_id: call_id.clone(),
tx_event: session.get_tx_event(),
});
let params = ShellToolCallParams {
command: shell_invocation,
workdir: None,
timeout_ms: None,
with_escalated_permissions: None,
justification: None,
};
let sandbox_policy = SandboxPolicy::DangerFullAccess;
let exec_result = execute_exec_env(exec_env, &sandbox_policy, stdout_stream)
.or_cancel(&cancellation_token)
.await;
let tool_call = ToolCall {
tool_name: USER_SHELL_TOOL_NAME.to_string(),
call_id: Uuid::new_v4().to_string(),
payload: ToolPayload::LocalShell {
params,
is_user_shell_command: true,
},
};
match exec_result {
Err(CancelErr::Cancelled) => {
let aborted_message = "command aborted by user".to_string();
let exec_output = ExecToolCallOutput {
exit_code: -1,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(aborted_message.clone()),
aggregated_output: StreamOutput::new(aborted_message.clone()),
duration: Duration::ZERO,
timed_out: false,
};
let output_items = [user_shell_command_record_item(&raw_command, &exec_output)];
session
.record_conversation_items(turn_context.as_ref(), &output_items)
.await;
session
.send_event(
turn_context.as_ref(),
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
stdout: String::new(),
stderr: aborted_message.clone(),
aggregated_output: aborted_message.clone(),
exit_code: -1,
duration: Duration::ZERO,
formatted_output: aborted_message,
}),
)
.await;
}
Ok(Ok(output)) => {
session
.send_event(
turn_context.as_ref(),
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: call_id.clone(),
stdout: output.stdout.text.clone(),
stderr: output.stderr.text.clone(),
aggregated_output: output.aggregated_output.text.clone(),
exit_code: output.exit_code,
duration: output.duration,
formatted_output: format_exec_output_str(&output),
}),
)
.await;
let router = Arc::new(ToolRouter::from_config(&turn_context.tools_config, None));
let tracker = Arc::new(Mutex::new(TurnDiffTracker::new()));
let runtime = ToolCallRuntime::new(
Arc::clone(&router),
Arc::clone(&session),
Arc::clone(&turn_context),
Arc::clone(&tracker),
);
let output_items = [user_shell_command_record_item(&raw_command, &output)];
session
.record_conversation_items(turn_context.as_ref(), &output_items)
.await;
}
Ok(Err(err)) => {
error!("user shell command failed: {err:?}");
let message = format!("execution error: {err:?}");
let exec_output = ExecToolCallOutput {
exit_code: -1,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(message.clone()),
aggregated_output: StreamOutput::new(message.clone()),
duration: Duration::ZERO,
timed_out: false,
};
session
.send_event(
turn_context.as_ref(),
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
stdout: exec_output.stdout.text.clone(),
stderr: exec_output.stderr.text.clone(),
aggregated_output: exec_output.aggregated_output.text.clone(),
exit_code: exec_output.exit_code,
duration: exec_output.duration,
formatted_output: format_exec_output_str(&exec_output),
}),
)
.await;
let output_items = [user_shell_command_record_item(&raw_command, &exec_output)];
session
.record_conversation_items(turn_context.as_ref(), &output_items)
.await;
}
if let Err(err) = runtime
.handle_tool_call(tool_call, cancellation_token)
.await
{
error!("user shell command failed: {err:?}");
}
None
}

View File

@@ -40,6 +40,7 @@ pub enum ToolPayload {
},
LocalShell {
params: ShellToolCallParams,
is_user_shell_command: bool,
},
UnifiedExec {
arguments: String,
@@ -56,7 +57,7 @@ impl ToolPayload {
match self {
ToolPayload::Function { arguments } => Cow::Borrowed(arguments),
ToolPayload::Custom { input } => Cow::Borrowed(input),
ToolPayload::LocalShell { params } => Cow::Owned(params.command.join(" ")),
ToolPayload::LocalShell { params, .. } => Cow::Owned(params.command.join(" ")),
ToolPayload::UnifiedExec { arguments } => Cow::Borrowed(arguments),
ToolPayload::Mcp { raw_arguments, .. } => Cow::Borrowed(raw_arguments),
}

View File

@@ -19,7 +19,6 @@ pub use mcp::McpHandler;
pub use mcp_resource::McpResourceHandler;
pub use plan::PlanHandler;
pub use read_file::ReadFileHandler;
pub use shell::ShellCommandHandler;
pub use shell::ShellHandler;
pub use test_sync::TestSyncHandler;
pub use unified_exec::UnifiedExecHandler;

View File

@@ -1,5 +1,4 @@
use async_trait::async_trait;
use codex_protocol::models::ShellCommandToolCallParams;
use codex_protocol::models::ShellToolCallParams;
use std::sync::Arc;
@@ -26,8 +25,6 @@ use crate::tools::sandboxing::ToolCtx;
pub struct ShellHandler;
pub struct ShellCommandHandler;
impl ShellHandler {
fn to_exec_params(params: ShellToolCallParams, turn_context: &TurnContext) -> ExecParams {
ExecParams {
@@ -42,28 +39,6 @@ impl ShellHandler {
}
}
impl ShellCommandHandler {
fn to_exec_params(
params: ShellCommandToolCallParams,
session: &crate::codex::Session,
turn_context: &TurnContext,
) -> ExecParams {
let shell = session.user_shell();
let use_login_shell = true;
let command = shell.derive_exec_args(&params.command, use_login_shell);
ExecParams {
command,
cwd: turn_context.resolve_path(params.workdir.clone()),
timeout_ms: params.timeout_ms,
env: create_env(&turn_context.shell_environment_policy),
with_escalated_permissions: params.with_escalated_permissions,
justification: params.justification,
arg0: None,
}
}
}
#[async_trait]
impl ToolHandler for ShellHandler {
fn kind(&self) -> ToolKind {
@@ -107,7 +82,10 @@ impl ToolHandler for ShellHandler {
)
.await
}
ToolPayload::LocalShell { params } => {
ToolPayload::LocalShell {
params,
is_user_shell_command,
} => {
let exec_params = Self::to_exec_params(params, turn.as_ref());
Self::run_exec_like(
tool_name.as_str(),
@@ -116,7 +94,7 @@ impl ToolHandler for ShellHandler {
turn,
tracker,
call_id,
true,
is_user_shell_command,
)
.await
}
@@ -127,49 +105,6 @@ impl ToolHandler for ShellHandler {
}
}
#[async_trait]
impl ToolHandler for ShellCommandHandler {
fn kind(&self) -> ToolKind {
ToolKind::Function
}
fn matches_kind(&self, payload: &ToolPayload) -> bool {
matches!(payload, ToolPayload::Function { .. })
}
async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> {
let ToolInvocation {
session,
turn,
tracker,
call_id,
tool_name,
payload,
} = invocation;
let ToolPayload::Function { arguments } = payload else {
return Err(FunctionCallError::RespondToModel(format!(
"unsupported payload for shell_command handler: {tool_name}"
)));
};
let params: ShellCommandToolCallParams = serde_json::from_str(&arguments).map_err(|e| {
FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}"))
})?;
let exec_params = Self::to_exec_params(params, session.as_ref(), turn.as_ref());
ShellHandler::run_exec_like(
tool_name.as_str(),
exec_params,
session,
turn,
tracker,
call_id,
false,
)
.await
}
}
impl ShellHandler {
async fn run_exec_like(
tool_name: &str,
@@ -287,6 +222,7 @@ impl ShellHandler {
env: exec_params.env.clone(),
with_escalated_permissions: exec_params.with_escalated_permissions,
justification: exec_params.justification.clone(),
is_user_shell_command,
};
let mut orchestrator = ToolOrchestrator::new();
let mut runtime = ShellRuntime::new();
@@ -308,49 +244,3 @@ impl ShellHandler {
})
}
}
#[cfg(test)]
mod tests {
use crate::is_safe_command::is_known_safe_command;
use crate::shell::BashShell;
use crate::shell::Shell;
use crate::shell::ZshShell;
/// The logic for is_known_safe_command() has heuristics for known shells,
/// so we must ensure the commands generated by [ShellCommandHandler] can be
/// recognized as safe if the `command` is safe.
#[test]
fn commands_generated_by_shell_command_handler_can_be_matched_by_is_known_safe_command() {
let bash_shell = Shell::Bash(BashShell {
shell_path: "/bin/bash".to_string(),
bashrc_path: "/home/user/.bashrc".to_string(),
});
assert_safe(&bash_shell, "ls -la");
let zsh_shell = Shell::Zsh(ZshShell {
shell_path: "/bin/zsh".to_string(),
zshrc_path: "/home/user/.zshrc".to_string(),
});
assert_safe(&zsh_shell, "ls -la");
#[cfg(target_os = "windows")]
{
use crate::shell::PowerShellConfig;
let powershell = Shell::PowerShell(PowerShellConfig {
exe: "pwsh.exe".to_string(),
bash_exe_fallback: None,
});
assert_safe(&powershell, "ls -Name");
}
}
fn assert_safe(shell: &Shell, command: &str) {
assert!(is_known_safe_command(
&shell.derive_exec_args(command, /* use_login_shell */ true)
));
assert!(is_known_safe_command(
&shell.derive_exec_args(command, /* use_login_shell */ false)
));
}
}

View File

@@ -1,7 +1,8 @@
use std::path::PathBuf;
use std::time::Duration;
use async_trait::async_trait;
use serde::Deserialize;
use serde::Serialize;
use crate::function_tool::FunctionCallError;
use crate::protocol::EventMsg;
@@ -26,8 +27,6 @@ pub struct UnifiedExecHandler;
#[derive(Debug, Deserialize)]
struct ExecCommandArgs {
cmd: String,
#[serde(default)]
workdir: Option<String>,
#[serde(default = "default_shell")]
shell: String,
#[serde(default = "default_login")]
@@ -36,10 +35,6 @@ struct ExecCommandArgs {
yield_time_ms: Option<u64>,
#[serde(default)]
max_output_tokens: Option<usize>,
#[serde(default)]
with_escalated_permissions: Option<bool>,
#[serde(default)]
justification: Option<String>,
}
#[derive(Debug, Deserialize)]
@@ -104,34 +99,6 @@ impl ToolHandler for UnifiedExecHandler {
"failed to parse exec_command arguments: {err:?}"
))
})?;
let ExecCommandArgs {
cmd,
workdir,
shell,
login,
yield_time_ms,
max_output_tokens,
with_escalated_permissions,
justification,
} = args;
if with_escalated_permissions.unwrap_or(false)
&& !matches!(
context.turn.approval_policy,
codex_protocol::protocol::AskForApproval::OnRequest
)
{
return Err(FunctionCallError::RespondToModel(format!(
"approval policy is {policy:?}; reject command — you cannot ask for escalated permissions if the approval policy is {policy:?}",
policy = context.turn.approval_policy
)));
}
let workdir = workdir
.as_deref()
.filter(|value| !value.is_empty())
.map(PathBuf::from);
let cwd = workdir.clone().unwrap_or_else(|| context.turn.cwd.clone());
let event_ctx = ToolEventCtx::new(
context.session.as_ref(),
@@ -139,20 +106,18 @@ impl ToolHandler for UnifiedExecHandler {
&context.call_id,
None,
);
let emitter = ToolEmitter::unified_exec(cmd.clone(), cwd.clone(), true);
let emitter =
ToolEmitter::unified_exec(args.cmd.clone(), context.turn.cwd.clone(), true);
emitter.emit(event_ctx, ToolEventStage::Begin).await;
manager
.exec_command(
ExecCommandRequest {
command: &cmd,
shell: &shell,
login,
yield_time_ms,
max_output_tokens,
workdir,
with_escalated_permissions,
justification,
command: &args.cmd,
shell: &args.shell,
login: args.login,
yield_time_ms: args.yield_time_ms,
max_output_tokens: args.max_output_tokens,
},
&context,
)
@@ -198,7 +163,11 @@ impl ToolHandler for UnifiedExecHandler {
.await;
}
let content = format_response(&response);
let content = serialize_response(&response).map_err(|err| {
FunctionCallError::RespondToModel(format!(
"failed to serialize unified exec output: {err:?}"
))
})?;
Ok(ToolOutput::Function {
content,
@@ -208,30 +177,32 @@ impl ToolHandler for UnifiedExecHandler {
}
}
fn format_response(response: &UnifiedExecResponse) -> String {
let mut sections = Vec::new();
if !response.chunk_id.is_empty() {
sections.push(format!("Chunk ID: {}", response.chunk_id));
}
let wall_time_seconds = response.wall_time.as_secs_f64();
sections.push(format!("Wall time: {wall_time_seconds:.4} seconds"));
if let Some(exit_code) = response.exit_code {
sections.push(format!("Process exited with code {exit_code}"));
}
if let Some(session_id) = response.session_id {
sections.push(format!("Process running with session ID {session_id}"));
}
if let Some(original_token_count) = response.original_token_count {
sections.push(format!("Original token count: {original_token_count}"));
}
sections.push("Output:".to_string());
sections.push(response.output.clone());
sections.join("\n")
#[derive(Serialize)]
struct SerializedUnifiedExecResponse<'a> {
chunk_id: &'a str,
wall_time_seconds: f64,
output: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
exit_code: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
original_token_count: Option<usize>,
}
fn serialize_response(response: &UnifiedExecResponse) -> Result<String, serde_json::Error> {
let payload = SerializedUnifiedExecResponse {
chunk_id: &response.chunk_id,
wall_time_seconds: duration_to_seconds(response.wall_time),
output: &response.output,
session_id: response.session_id,
exit_code: response.exit_code,
original_token_count: response.original_token_count,
};
serde_json::to_string(&payload)
}
fn duration_to_seconds(duration: Duration) -> f64 {
duration.as_secs_f64()
}

Some files were not shown because too many files have changed in this diff Show More