Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Bolin
070970af30 exploration: rollback #1602 2025-08-13 23:42:50 -07:00
197 changed files with 7313 additions and 10262 deletions

View File

@@ -9,7 +9,7 @@
},
"devDependencies": {
"@types/bun": "^1.2.20",
"@types/node": "^24.3.0",
"@types/node": "^24.2.1",
"prettier": "^3.6.2",
"typescript": "^5.9.2",
},
@@ -50,7 +50,7 @@
"@types/bun": ["@types/bun@1.2.20", "", { "dependencies": { "bun-types": "1.2.20" } }, "sha512-dX3RGzQ8+KgmMw7CsW4xT5ITBSCrSbfHc36SNT31EOUg/LA9JWq0VDdEXDRSe1InVWpd2yLUM1FUF/kEOyTzYA=="],
"@types/node": ["@types/node@24.3.0", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow=="],
"@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
@@ -82,8 +82,6 @@
"@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
"bun-types/@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
"@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
"@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],

View File

@@ -14,7 +14,7 @@
},
"devDependencies": {
"@types/bun": "^1.2.20",
"@types/node": "^24.3.0",
"@types/node": "^24.2.1",
"prettier": "^3.6.2",
"typescript": "^5.9.2"
}

View File

@@ -24,7 +24,3 @@ updates:
directory: /
schedule:
interval: weekly
- package-ecosystem: rust-toolchain
directory: codex-rs
schedule:
interval: weekly

View File

@@ -17,10 +17,6 @@
"linux-aarch64": {
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"windows-x86_64": {
"regex": "^codex-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex.exe"
}
}
}

View File

@@ -12,7 +12,7 @@ jobs:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4

View File

@@ -18,7 +18,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell

View File

@@ -37,9 +37,9 @@ jobs:
# Codex is not going to run.
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.88
with:
targets: x86_64-unknown-linux-gnu
components: clippy

View File

@@ -1,76 +1,42 @@
name: rust-ci
on:
pull_request: {}
pull_request:
branches:
- main
paths:
- "codex-rs/**"
- ".github/**"
push:
branches:
- main
workflow_dispatch:
# CI builds in debug (dev) for faster signal.
# For CI, we build in debug (`--profile dev`) rather than release mode so we
# get signal faster.
jobs:
# --- Detect what changed (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
outputs:
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Detect changed paths (no external action)
id: detect
shell: bash
run: |
set -euo pipefail
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
BASE_SHA='${{ github.event.pull_request.base.sha }}'
echo "Base SHA: $BASE_SHA"
# List files changed between base and current HEAD (merge-base aware)
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
else
# On push / manual runs, default to running everything
files=("codex-rs/force" ".github/force")
fi
codex=false
workflows=false
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == .github/* ]] && workflows=true
done
echo "codex=$codex" >> "$GITHUB_OUTPUT"
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
# --- CI that doesn't need specific targets ---------------------------------
# CI that don't need specific targets
general:
name: Format / etc
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
components: rustfmt
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
# --- CI to validate on different os/targets --------------------------------
# CI to validate on different os/targets
lint_build_test:
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
@@ -78,6 +44,8 @@ jobs:
strategy:
fail-fast: false
matrix:
# Note: While Codex CLI does not support Windows today, we include
# Windows in CI to ensure the code at least builds there.
include:
- runner: macos-14
target: aarch64-apple-darwin
@@ -111,8 +79,8 @@ jobs:
profile: release
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
targets: ${{ matrix.target }}
components: clippy
@@ -130,7 +98,7 @@ jobs:
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
run: |
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
sudo apt install -y musl-tools pkg-config
- name: cargo clippy
id: clippy
@@ -139,15 +107,13 @@ jobs:
# Running `cargo build` from the workspace root builds the workspace using
# the union of all features from third-party crates. This can mask errors
# where individual crates have underspecified features. To avoid this, we
# run `cargo check` for each crate individually, though because this is
# run `cargo build` for each crate individually, though because this is
# slower, we only do this for the x86_64-unknown-linux-gnu target.
- name: cargo check individual crates
id: cargo_check_all_crates
- name: cargo build individual crates
id: build
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }}
continue-on-error: true
run: |
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
run: find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo build --profile ${{ matrix.profile }}'
- name: cargo test
id: test
@@ -162,32 +128,8 @@ jobs:
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure' ||
steps.build.outcome == 'failure' ||
steps.test.outcome == 'failure'
run: |
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
echo "One or more checks failed (clippy, build, or test). See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs: [changed, general, lint_build_test]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "general: ${{ needs.general.result }}"
echo "matrix : ${{ needs.lint_build_test.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
echo 'No relevant changes -> CI not required.'
exit 0
fi
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }

View File

@@ -19,7 +19,7 @@ jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Validate tag matches Cargo.toml version
shell: bash
@@ -74,8 +74,8 @@ jobs:
target: x86_64-pc-windows-msvc
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
targets: ${{ matrix.target }}
@@ -117,11 +117,10 @@ jobs:
dest="dist/${{ matrix.target }}"
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
# additionally create a `.tar.gz` alongside every single binary that
# we publish. The end result is:
# codex-<target>.zst (existing)
# codex-<target>.tar.gz (new)
# codex-<target>.zip (only for Windows)
# 1. Produce a .tar.gz for every file in the directory *before* we
# run `zstd --rm`, because that flag deletes the original files.
@@ -129,20 +128,13 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
if [[ "$base" == *.tar.gz ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Create zip archive for Windows binaries
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd -T0 -19 --rm "$dest/$base"
@@ -163,7 +155,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:

36
.vscode/launch.json vendored
View File

@@ -1,22 +1,18 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo launch",
"cargo": {
"cwd": "${workspaceFolder}/codex-rs",
"args": ["build", "--bin=codex-tui"]
},
"args": []
},
{
"type": "lldb",
"request": "attach",
"name": "Attach to running codex CLI",
"pid": "${command:pickProcess}",
"sourceLanguages": ["rust"]
}
]
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo launch",
"cargo": {
"cwd": "${workspaceFolder}/codex-rs",
"args": [
"build",
"--bin=codex-tui"
]
},
"args": []
}
]
}

View File

@@ -8,35 +8,6 @@ In the codex-rs folder where the rust code lives:
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
Before finalizing a change to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
## TUI style conventions
See `codex-rs/tui/styles.md`.
## TUI code conventions
- Use concise styling helpers from ratatuis Stylize trait.
- Basic spans: use "text".into()
- Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc.
- Prefer these over constructing styles with `Span::styled` and `Style` directly.
- Example: patch summary file lines
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
## Snapshot tests
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
- Run tests to generate any updated snapshots:
- `cargo test -p codex-tui`
- Check whats pending:
- `cargo insta pending-snapshots -p codex-tui`
- Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file:
- `cargo insta show -p codex-tui path/to/file.snap.new`
- Only if you intend to accept all new snapshots in this crate, run:
- `cargo insta accept -p codex-tui`
If you dont have the tool:
- `cargo install cargo-insta`
When making individual changes prefer running tests on individual files or projects first.

View File

@@ -22,7 +22,6 @@
- [Authenticate locally and copy your credentials to the "headless" machine](#authenticate-locally-and-copy-your-credentials-to-the-headless-machine)
- [Connecting through VPS or remote](#connecting-through-vps-or-remote)
- [Usage-based billing alternative: Use an OpenAI API key](#usage-based-billing-alternative-use-an-openai-api-key)
- [Forcing a specific auth method (advanced)](#forcing-a-specific-auth-method-advanced)
- [Choosing Codex's level of autonomy](#choosing-codexs-level-of-autonomy)
- [**1. Read/write**](#1-readwrite)
- [**2. Read-only**](#2-read-only)
@@ -166,35 +165,6 @@ Notes:
- This command only sets the key for your current terminal session, which we recommend. To set it for all future sessions, you can also add the `export` line to your shell's configuration file (e.g., `~/.zshrc`).
- If you have signed in with ChatGPT, Codex will default to using your ChatGPT credits. If you wish to use your API key, use the `/logout` command to clear your ChatGPT authentication.
#### Forcing a specific auth method (advanced)
You can explicitly choose which authentication Codex should prefer when both are available.
- To always use your API key (even when ChatGPT auth exists), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "apikey"
```
Or override ad-hoc via CLI:
```bash
codex --config preferred_auth_method="apikey"
```
- To prefer ChatGPT auth (default), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "chatgpt"
```
Notes:
- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped.
- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode.
### Choosing Codex's level of autonomy
We always recommend running Codex in its default sandbox that gives you strong guardrails around what the agent can do. The default sandbox prevents it from editing files outside its workspace, or from accessing the network.

326
codex-rs/Cargo.lock generated
View File

@@ -176,9 +176,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.99"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
[[package]]
name = "arbitrary"
@@ -203,12 +203,6 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "ascii"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16"
[[package]]
name = "ascii-canvas"
version = "3.0.0"
@@ -487,12 +481,6 @@ dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-expr"
version = "0.15.8"
@@ -530,17 +518,11 @@ dependencies = [
"windows-link",
]
[[package]]
name = "chunked_transfer"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901"
[[package]]
name = "clap"
version = "4.5.45"
version = "4.5.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318"
checksum = "50fd97c9dc2399518aa331917ac6f274280ec5eb34e555dd291899745c48ec6f"
dependencies = [
"clap_builder",
"clap_derive",
@@ -548,9 +530,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.44"
version = "4.5.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8"
checksum = "c35b5830294e1fa0462034af85cc95225a4cb07092c088c55bda3147cfcd8f65"
dependencies = [
"anstream",
"anstyle",
@@ -561,18 +543,18 @@ dependencies = [
[[package]]
name = "clap_complete"
version = "4.5.57"
version = "4.5.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad"
checksum = "67e4efcbb5da11a92e8a609233aa1e8a7d91e38de0be865f016d14700d45a7fd"
dependencies = [
"clap",
]
[[package]]
name = "clap_derive"
version = "4.5.45"
version = "4.5.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6"
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
dependencies = [
"heck",
"proc-macro2",
@@ -665,8 +647,6 @@ dependencies = [
"codex-exec",
"codex-login",
"codex-mcp-server",
"codex-protocol",
"codex-protocol-ts",
"codex-tui",
"serde_json",
"tokio",
@@ -680,7 +660,6 @@ version = "0.0.0"
dependencies = [
"clap",
"codex-core",
"codex-protocol",
"serde",
"toml 0.9.5",
]
@@ -698,11 +677,11 @@ dependencies = [
"codex-apply-patch",
"codex-login",
"codex-mcp-client",
"codex-protocol",
"core_test_support",
"dirs",
"env-flags",
"eventsource-stream",
"fs2",
"futures",
"landlock",
"libc",
@@ -754,9 +733,6 @@ dependencies = [
"codex-common",
"codex-core",
"codex-ollama",
"codex-protocol",
"core_test_support",
"libc",
"owo-colors",
"predicates",
"serde_json",
@@ -765,7 +741,6 @@ dependencies = [
"tokio",
"tracing",
"tracing-subscriber",
"wiremock",
]
[[package]]
@@ -823,18 +798,12 @@ dependencies = [
"base64 0.22.1",
"chrono",
"pretty_assertions",
"rand 0.8.5",
"reqwest",
"serde",
"serde_json",
"sha2",
"tempfile",
"thiserror 2.0.12",
"tiny_http",
"tokio",
"url",
"urlencoding",
"webbrowser",
]
[[package]]
@@ -857,10 +826,7 @@ dependencies = [
"anyhow",
"assert_cmd",
"codex-arg0",
"codex-common",
"codex-core",
"codex-login",
"codex-protocol",
"mcp-types",
"mcp_test_support",
"pretty_assertions",
@@ -896,37 +862,11 @@ dependencies = [
"wiremock",
]
[[package]]
name = "codex-protocol"
version = "0.0.0"
dependencies = [
"mcp-types",
"pretty_assertions",
"serde",
"serde_bytes",
"serde_json",
"strum 0.27.2",
"strum_macros 0.27.2",
"ts-rs",
"uuid",
]
[[package]]
name = "codex-protocol-ts"
version = "0.0.0"
dependencies = [
"anyhow",
"clap",
"codex-protocol",
"ts-rs",
]
[[package]]
name = "codex-tui"
version = "0.0.0"
dependencies = [
"anyhow",
"async-stream",
"base64 0.22.1",
"chrono",
"clap",
@@ -937,7 +877,6 @@ dependencies = [
"codex-file-search",
"codex-login",
"codex-ollama",
"codex-protocol",
"color-eyre",
"crossterm",
"diffy",
@@ -949,7 +888,7 @@ dependencies = [
"once_cell",
"path-clean",
"pretty_assertions",
"rand 0.9.2",
"rand 0.8.5",
"ratatui",
"ratatui-image",
"regex-lite",
@@ -962,7 +901,6 @@ dependencies = [
"supports-color",
"textwrap 0.16.2",
"tokio",
"tokio-stream",
"tracing",
"tracing-appender",
"tracing-subscriber",
@@ -1013,16 +951,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]]
name = "compact_str"
version = "0.8.1"
@@ -1077,16 +1005,6 @@ dependencies = [
"libc",
]
[[package]]
name = "core-foundation"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
@@ -1163,7 +1081,6 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
dependencies = [
"bitflags 2.9.1",
"crossterm_winapi",
"futures-core",
"mio",
"parking_lot",
"rustix 0.38.44",
@@ -1747,6 +1664,16 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "fs2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "futures"
version = "0.3.31"
@@ -2528,28 +2455,6 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "jni"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"log",
"thiserror 1.0.69",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.33"
@@ -2632,9 +2537,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
[[package]]
name = "libc"
version = "0.2.175"
version = "0.2.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
[[package]]
name = "libfuzzer-sys"
@@ -2781,7 +2686,6 @@ version = "0.0.0"
dependencies = [
"serde",
"serde_json",
"ts-rs",
]
[[package]]
@@ -2792,10 +2696,8 @@ dependencies = [
"assert_cmd",
"codex-core",
"codex-mcp-server",
"codex-protocol",
"mcp-types",
"pretty_assertions",
"serde",
"serde_json",
"shlex",
"tempfile",
@@ -2889,12 +2791,6 @@ dependencies = [
"tempfile",
]
[[package]]
name = "ndk-context"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
[[package]]
name = "new_debug_unreachable"
version = "1.0.6"
@@ -3048,31 +2944,6 @@ dependencies = [
"libc",
]
[[package]]
name = "objc2"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "561f357ba7f3a2a61563a186a163d0a3a5247e1089524a3981d49adb775078bc"
dependencies = [
"objc2-encode",
]
[[package]]
name = "objc2-encode"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33"
[[package]]
name = "objc2-foundation"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "900831247d2fe1a09a683278e5384cfb8c80c79fe6b166f9d14bfdde0ea1b03c"
dependencies = [
"bitflags 2.9.1",
"objc2",
]
[[package]]
name = "object"
version = "0.36.7"
@@ -3799,7 +3670,6 @@ dependencies = [
"base64 0.22.1",
"bytes",
"encoding_rs",
"futures-channel",
"futures-core",
"futures-util",
"h2",
@@ -4122,7 +3992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
"bitflags 2.9.1",
"core-foundation 0.9.4",
"core-foundation",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -4281,17 +4151,6 @@ dependencies = [
"digest",
]
[[package]]
name = "sha2"
version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@@ -4656,7 +4515,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
dependencies = [
"bitflags 2.9.1",
"core-foundation 0.9.4",
"core-foundation",
"system-configuration-sys",
]
@@ -4713,15 +4572,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "termcolor"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "terminal_size"
version = "0.4.2"
@@ -4860,18 +4710,6 @@ dependencies = [
"crunchy",
]
[[package]]
name = "tiny_http"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82"
dependencies = [
"ascii",
"chunked_transfer",
"httpdate",
"log",
]
[[package]]
name = "tinystr"
version = "0.8.1"
@@ -5221,30 +5059,6 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "ts-rs"
version = "11.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be"
dependencies = [
"serde_json",
"thiserror 2.0.12",
"ts-rs-macros",
"uuid",
]
[[package]]
name = "ts-rs-macros"
version = "11.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9d4ed7b4c18cc150a6a0a1e9ea1ecfa688791220781af6e119f9599a8502a0a"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"termcolor",
]
[[package]]
name = "tui-input"
version = "0.14.0"
@@ -5348,12 +5162,6 @@ dependencies = [
"serde",
]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]]
name = "utf8_iter"
version = "1.0.4"
@@ -5577,22 +5385,6 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "webbrowser"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aaf4f3c0ba838e82b4e5ccc4157003fb8c324ee24c058470ffb82820becbde98"
dependencies = [
"core-foundation 0.10.1",
"jni",
"log",
"ndk-context",
"objc2",
"objc2-foundation",
"url",
"web-sys",
]
[[package]]
name = "weezl"
version = "0.1.10"
@@ -5781,15 +5573,6 @@ dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
@@ -5817,21 +5600,6 @@ dependencies = [
"windows-targets 0.53.2",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
@@ -5864,12 +5632,6 @@ dependencies = [
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
@@ -5882,12 +5644,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
@@ -5900,12 +5656,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
@@ -5930,12 +5680,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
@@ -5948,12 +5692,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
@@ -5966,12 +5704,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
@@ -5984,12 +5716,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"

View File

@@ -15,8 +15,6 @@ members = [
"mcp-server",
"mcp-types",
"ollama",
"protocol",
"protocol-ts",
"tui",
]
resolver = "2"

View File

@@ -22,8 +22,6 @@ use tree_sitter_bash::LANGUAGE as BASH;
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
#[derive(Debug, Error, PartialEq)]
pub enum ApplyPatchError {
#[error(transparent)]
@@ -84,6 +82,7 @@ pub struct ApplyPatchArgs {
}
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
match argv {
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
Ok(source) => MaybeApplyPatch::Body(source),
@@ -92,9 +91,7 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
[bash, flag, script]
if bash == "bash"
&& flag == "-lc"
&& APPLY_PATCH_COMMANDS
.iter()
.any(|cmd| script.trim_start().starts_with(cmd)) =>
&& script.trim_start().starts_with("apply_patch") =>
{
match extract_heredoc_body_from_apply_patch_command(script) {
Ok(body) => match parse_patch(&body) {
@@ -169,7 +166,7 @@ impl ApplyPatchAction {
panic!("path must be absolute");
}
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
let filename = path
.file_name()
.expect("path should not be empty")
@@ -182,7 +179,7 @@ impl ApplyPatchAction {
*** End Patch"#,
);
let changes = HashMap::from([(path.to_path_buf(), ApplyPatchFileChange::Add { content })]);
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
Self {
changes,
cwd: path
@@ -265,10 +262,7 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
fn extract_heredoc_body_from_apply_patch_command(
src: &str,
) -> std::result::Result<String, ExtractHeredocError> {
if !APPLY_PATCH_COMMANDS
.iter()
.any(|cmd| src.trim_start().starts_with(cmd))
{
if !src.trim_start().starts_with("apply_patch") {
return Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch);
}
@@ -421,12 +415,12 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
for hunk in hunks {
match hunk {
Hunk::AddFile { path, contents } => {
if let Some(parent) = path.parent()
&& !parent.as_os_str().is_empty()
{
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", path.display())
})?;
if let Some(parent) = path.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", path.display())
})?;
}
}
std::fs::write(path, contents)
.with_context(|| format!("Failed to write file {}", path.display()))?;
@@ -445,12 +439,15 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
let AppliedPatch { new_contents, .. } =
derive_new_contents_from_chunks(path, chunks)?;
if let Some(dest) = move_path {
if let Some(parent) = dest.parent()
&& !parent.as_os_str().is_empty()
{
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", dest.display())
})?;
if let Some(parent) = dest.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent).with_context(|| {
format!(
"Failed to create parent directories for {}",
dest.display()
)
})?;
}
}
std::fs::write(dest, new_contents)
.with_context(|| format!("Failed to write file {}", dest.display()))?;
@@ -532,12 +529,9 @@ fn compute_replacements(
// If a chunk has a `change_context`, we use seek_sequence to find it, then
// adjust our `line_index` to continue from there.
if let Some(ctx_line) = &chunk.change_context {
if let Some(idx) = seek_sequence::seek_sequence(
original_lines,
std::slice::from_ref(ctx_line),
line_index,
false,
) {
if let Some(idx) =
seek_sequence::seek_sequence(original_lines, &[ctx_line.clone()], line_index, false)
{
line_index = idx + 1;
} else {
return Err(ApplyPatchError::ComputeReplacements(format!(
@@ -688,6 +682,8 @@ pub fn print_summary(
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;
use std::fs;
@@ -779,33 +775,6 @@ PATCH"#,
}
}
#[test]
fn test_heredoc_applypatch() {
let args = strs_to_strings(&[
"bash",
"-lc",
r#"applypatch <<'PATCH'
*** Begin Patch
*** Add File: foo
+hi
*** End Patch
PATCH"#,
]);
match maybe_parse_apply_patch(&args) {
MaybeApplyPatch::Body(ApplyPatchArgs { hunks, patch: _ }) => {
assert_eq!(
hunks,
vec![Hunk::AddFile {
path: PathBuf::from("foo"),
contents: "hi\n".to_string()
}]
);
}
result => panic!("expected MaybeApplyPatch::Body got {result:?}"),
}
}
#[test]
fn test_add_file_hunk_creates_file_with_contents() {
let dir = tempdir().unwrap();

View File

@@ -427,6 +427,7 @@ fn parse_update_file_chunk(
}
#[test]
#[allow(clippy::unwrap_used)]
fn test_parse_patch() {
assert_eq!(
parse_patch_text("bad", ParseMode::Strict),

View File

@@ -82,34 +82,10 @@ where
})
}
const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_";
/// Load env vars from ~/.codex/.env and `$(pwd)/.env`.
///
/// Security: Do not allow `.env` files to create or modify any variables
/// with names starting with `CODEX_`.
fn load_dotenv() {
if let Ok(codex_home) = codex_core::config::find_codex_home()
&& let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env"))
{
set_filtered(iter);
}
if let Ok(iter) = dotenvy::dotenv_iter() {
set_filtered(iter);
}
}
/// Helper to set vars from a dotenvy iterator while filtering out `CODEX_` keys.
fn set_filtered<I>(iter: I)
where
I: IntoIterator<Item = Result<(String, String), dotenvy::Error>>,
{
for (key, value) in iter.into_iter().flatten() {
if !key.to_ascii_uppercase().starts_with(ILLEGAL_ENV_VAR_PREFIX) {
// It is safe to call set_var() because our process is
// single-threaded at this point in its execution.
unsafe { std::env::set_var(&key, &value) };
}
if let Ok(codex_home) = codex_core::config::find_codex_home() {
dotenvy::from_path(codex_home.join(".env")).ok();
}
dotenvy::dotenv().ok();
}

View File

@@ -1,4 +1,3 @@
use codex_login::AuthMode;
use codex_login::CodexAuth;
use std::path::Path;
use std::sync::LazyLock;
@@ -20,7 +19,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
let auth = CodexAuth::from_codex_home(codex_home)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);

View File

@@ -1,3 +1,5 @@
#![expect(clippy::expect_used)]
use codex_chatgpt::apply_command::apply_diff_from_task;
use codex_chatgpt::get_task::GetTaskResponse;
use std::path::Path;

View File

@@ -25,7 +25,6 @@ codex-core = { path = "../core" }
codex-exec = { path = "../exec" }
codex-login = { path = "../login" }
codex-mcp-server = { path = "../mcp-server" }
codex-protocol = { path = "../protocol" }
codex-tui = { path = "../tui" }
serde_json = "1"
tokio = { version = "1", features = [
@@ -37,4 +36,3 @@ tokio = { version = "1", features = [
] }
tracing = "0.1.41"
tracing-subscriber = "0.3.19"
codex-protocol-ts = { path = "../protocol-ts" }

View File

@@ -3,11 +3,11 @@ use std::path::PathBuf;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config_types::SandboxMode;
use codex_core::exec::spawn_command_under_linux_sandbox;
use codex_core::exec_env::create_env;
use codex_core::landlock::spawn_command_under_linux_sandbox;
use codex_core::seatbelt::spawn_command_under_seatbelt;
use codex_core::spawn::StdioPolicy;
use codex_protocol::config_types::SandboxMode;
use crate::LandlockCommand;
use crate::SeatbeltCommand;

View File

@@ -1,33 +1,20 @@
use std::env;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_login::AuthMode;
use codex_login::CLIENT_ID;
use codex_login::CodexAuth;
use codex_login::OPENAI_API_KEY_ENV_VAR;
use codex_login::ServerOptions;
use codex_login::login_with_api_key;
use codex_login::login_with_chatgpt;
use codex_login::logout;
use codex_login::run_login_server;
use std::env;
use std::path::PathBuf;
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string());
let server = run_login_server(opts)?;
eprintln!(
"Starting local login server on http://localhost:{}.\nIf your browser did not open, navigate to this URL to authenticate:\n\n{}",
server.actual_port, server.auth_url,
);
server.block_until_done().await
}
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match login_with_chatgpt(config.codex_home).await {
let capture_output = false;
match login_with_chatgpt(&config.codex_home, capture_output).await {
Ok(_) => {
eprintln!("Successfully logged in");
std::process::exit(0);
@@ -60,18 +47,18 @@ pub async fn run_login_with_api_key(
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
match CodexAuth::from_codex_home(&config.codex_home) {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token().await {
Ok(api_key) => {
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
&& env_api_key == api_key
{
eprintln!(
" API loaded from OPENAI_API_KEY environment variable or .env file"
);
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR) {
if env_api_key == api_key {
eprintln!(
" API loaded from OPENAI_API_KEY environment variable or .env file"
);
}
}
std::process::exit(0);
}

View File

@@ -72,10 +72,6 @@ enum Subcommand {
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
#[clap(visible_alias = "a")]
Apply(ApplyCommand),
/// Internal: generate TypeScript protocol bindings.
#[clap(hide = true)]
GenerateTs(GenerateTsCommand),
}
#[derive(Debug, Parser)]
@@ -124,17 +120,6 @@ struct LogoutCommand {
config_overrides: CliConfigOverrides,
}
#[derive(Debug, Parser)]
struct GenerateTsCommand {
/// Output directory where .ts files will be written
#[arg(short = 'o', long = "out", value_name = "DIR")]
out_dir: PathBuf,
/// Optional path to the Prettier executable to format generated files
#[arg(short = 'p', long = "prettier", value_name = "PRETTIER_BIN")]
prettier: Option<PathBuf>,
}
fn main() -> anyhow::Result<()> {
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
cli_main(codex_linux_sandbox_exe).await?;
@@ -209,9 +194,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
prepend_config_flags(&mut apply_cli.config_overrides, cli.config_overrides);
run_apply_command(apply_cli, None).await?;
}
Some(Subcommand::GenerateTs(gen_cli)) => {
codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?;
}
}
Ok(())

View File

@@ -1,9 +0,0 @@
allow-expect-in-tests = true
allow-unwrap-in-tests = true
disallowed-methods = [
{ path = "ratatui::style::Color::Rgb", reason = "Use ANSI colors, which work better in various terminal themes." },
{ path = "ratatui::style::Color::Indexed", reason = "Use ANSI colors, which work better in various terminal themes." },
{ path = "ratatui::style::Stylize::white", reason = "Avoid hardcoding white; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
{ path = "ratatui::style::Stylize::black", reason = "Avoid hardcoding black; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
{ path = "ratatui::style::Stylize::yellow", reason = "Avoid yellow; prefer other colors in `tui/styles.md`." },
]

View File

@@ -9,7 +9,6 @@ workspace = true
[dependencies]
clap = { version = "4", features = ["derive", "wrap_help"], optional = true }
codex-core = { path = "../core" }
codex-protocol = { path = "../protocol" }
serde = { version = "1", optional = true }
toml = { version = "0.9", optional = true }

View File

@@ -1,46 +0,0 @@
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
/// A simple preset pairing an approval policy with a sandbox policy.
#[derive(Debug, Clone)]
pub struct ApprovalPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Approval policy to apply.
pub approval: AskForApproval,
/// Sandbox policy to apply.
pub sandbox: SandboxPolicy,
}
/// Built-in list of approval presets that pair approval and sandbox policy.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
vec![
ApprovalPreset {
id: "read-only",
label: "Read Only",
description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::ReadOnly,
},
ApprovalPreset {
id: "auto",
label: "Auto",
description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::new_workspace_write_policy(),
},
ApprovalPreset {
id: "full-access",
label: "Full Access",
description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution",
approval: AskForApproval::Never,
sandbox: SandboxPolicy::DangerFullAccess,
},
]
}

View File

@@ -64,11 +64,7 @@ impl CliConfigOverrides {
// `-c model=o3` without the quotes.
let value: Value = match parse_toml_value(value_str) {
Ok(v) => v,
Err(_) => {
// Strip leading/trailing quotes if present
let trimmed = value_str.trim().trim_matches(|c| c == '"' || c == '\'');
Value::String(trimmed.to_string())
}
Err(_) => Value::String(value_str.to_string()),
};
Ok((key.to_string(), value))
@@ -142,6 +138,7 @@ fn parse_toml_value(raw: &str) -> Result<Value, toml::de::Error> {
}
#[cfg(all(test, feature = "cli"))]
#[allow(clippy::expect_used, clippy::unwrap_used)]
mod tests {
use super::*;

View File

@@ -29,8 +29,3 @@ mod config_summary;
pub use config_summary::create_config_summary_entries;
// Shared fuzzy matcher (used by TUI selection popups and other UI filtering)
pub mod fuzzy_match;
// Shared model presets used by TUI and MCP server
pub mod model_presets;
// Shared approval presets (AskForApproval + Sandbox) used by TUI and MCP server
// Not to be confused with AskForApproval, which we should probably rename to EscalationPolicy.
pub mod approval_presets;

View File

@@ -1,54 +0,0 @@
use codex_core::protocol_config_types::ReasoningEffort;
/// A simple preset pairing a model slug with a reasoning effort.
#[derive(Debug, Clone, Copy)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Reasoning effort to apply for this preset.
pub effort: ReasoningEffort,
}
/// Built-in list of model presets that pair a model with a reasoning effort.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_model_presets() -> &'static [ModelPreset] {
// Order reflects effort from minimal to high.
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
model: "gpt-5",
effort: ReasoningEffort::Minimal,
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: ReasoningEffort::Low,
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: ReasoningEffort::Medium,
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "— maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: ReasoningEffort::High,
},
];
PRESETS
}

View File

@@ -7,7 +7,7 @@
//! `config.toml`.
use clap::ValueEnum;
use codex_protocol::config_types::SandboxMode;
use codex_core::config_types::SandboxMode;
#[derive(Clone, Copy, Debug, ValueEnum)]
#[value(rename_all = "kebab-case")]

View File

@@ -149,7 +149,6 @@ approval_policy = "untrusted"
```
If you want to be notified whenever a command fails, use "on-failure":
```toml
# If the command fails when run in the sandbox, Codex asks for permission to
# retry the command outside the sandbox.
@@ -157,14 +156,12 @@ approval_policy = "on-failure"
```
If you want the model to run until it decides that it needs to ask you for escalated permissions, use "on-request":
```toml
# The model decides when to escalate
approval_policy = "on-request"
```
Alternatively, you can have the model run until it is done, and never ask to run a command with escalated permissions:
```toml
# User is never prompted: if the command fails, Codex will automatically try
# something out. Note the `exec` subcommand always uses this mode.
@@ -220,14 +217,17 @@ Users can specify config values at multiple levels. Order of precedence is as fo
## model_reasoning_effort
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
- `"minimal"`
- `"low"`
- `"medium"` (default)
- `"high"`
Note: to minimize reasoning, choose `"minimal"`.
To disable reasoning, set `model_reasoning_effort` to `"none"` in your config:
```toml
model_reasoning_effort = "none" # disable reasoning
```
## model_reasoning_summary
@@ -281,9 +281,6 @@ sandbox_mode = "workspace-write"
exclude_tmpdir_env_var = false
exclude_slash_tmp = false
# Optional list of _additional_ writable roots beyond $TMPDIR and /tmp.
writable_roots = ["/Users/YOU/.pyenv/shims"]
# Allow the command being run inside the sandbox to make outbound network
# requests. Disabled by default.
network_access = false
@@ -300,16 +297,6 @@ This is reasonable to use if Codex is running in an environment that provides it
Though using this option may also be necessary if you try to use Codex in environments where its native sandboxing mechanisms are unsupported, such as older Linux kernels or on Windows.
## Approval presets
Codex provides three main Approval Presets:
- Read Only: Codex can read files and answer questions; edits, running commands, and network access require approval.
- Auto: Codex can read files, make edits, and run commands in the workspace without approval; asks for approval outside the workspace or for network access.
- Full Access: Full disk and network access without prompts; extremely risky.
You can further customize how Codex runs at the command line using the `--ask-for-approval` and `--sandbox` options.
## mcp_servers
Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy).
@@ -511,12 +498,10 @@ hide_agent_reasoning = true # defaults to false
Surfaces the models raw chain-of-thought ("raw reasoning content") when available.
Notes:
- Only takes effect if the selected model/provider actually emits raw reasoning content. Many models do not. When unsupported, this option has no visible effect.
- Raw reasoning may include intermediate thoughts or sensitive context. Enable only if acceptable for your workflow.
Example:
```toml
show_raw_agent_reasoning = true # defaults to false
```

View File

@@ -19,12 +19,12 @@ chrono = { version = "0.4", features = ["serde"] }
codex-apply-patch = { path = "../apply-patch" }
codex-login = { path = "../login" }
codex-mcp-client = { path = "../mcp-client" }
codex-protocol = { path = "../protocol" }
dirs = "6"
env-flags = "0.1.1"
eventsource-stream = "0.2.3"
fs2 = "0.4.3"
futures = "0.3"
libc = "0.2.175"
libc = "0.2.174"
mcp-types = { path = "../mcp-types" }
mime_guess = "2.0"
os_info = "3.12.0"

View File

@@ -1,5 +1,4 @@
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
use crate::protocol::FileChange;
@@ -9,6 +8,7 @@ use crate::safety::assess_patch_safety;
use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::ApplyPatchFileChange;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
pub const CODEX_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch";
@@ -41,16 +41,17 @@ impl From<ResponseInputItem> for InternalApplyPatchInvocation {
pub(crate) async fn apply_patch(
sess: &Session,
turn_context: &TurnContext,
sub_id: &str,
call_id: &str,
action: ApplyPatchAction,
) -> InternalApplyPatchInvocation {
let writable_roots_snapshot = sess.get_writable_roots().to_vec();
match assess_patch_safety(
&action,
turn_context.approval_policy,
&turn_context.sandbox_policy,
&turn_context.cwd,
sess.get_approval_policy(),
&writable_roots_snapshot,
sess.get_cwd(),
) {
SafetyCheck::AutoApprove { .. } => {
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
@@ -123,3 +124,30 @@ pub(crate) fn convert_apply_patch_to_protocol(
}
result
}
pub(crate) fn get_writable_roots(cwd: &Path) -> Vec<PathBuf> {
let mut writable_roots = Vec::new();
if cfg!(target_os = "macos") {
// On macOS, $TMPDIR is private to the user.
writable_roots.push(std::env::temp_dir());
// Allow pyenv to update its shims directory. Without this, any tool
// that happens to be managed by `pyenv` will fail with an error like:
//
// pyenv: cannot rehash: $HOME/.pyenv/shims isn't writable
//
// which is emitted every time `pyenv` tries to run `rehash` (for
// example, after installing a new Python package that drops an entry
// point). Although the sandbox is intentionally readonly by default,
// writing to the user's local `pyenv` directory is safe because it
// is already userwritable and scoped to the current user account.
if let Ok(home_dir) = std::env::var("HOME") {
let pyenv_dir = PathBuf::from(home_dir).join(".pyenv");
writable_roots.push(pyenv_dir);
}
}
writable_roots.push(cwd.to_path_buf());
writable_roots
}

View File

@@ -132,6 +132,7 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> {

View File

@@ -290,12 +290,13 @@ async fn process_chat_sse<S>(
.get("delta")
.and_then(|d| d.get("content"))
.and_then(|c| c.as_str())
&& !content.is_empty()
{
assistant_text.push_str(content);
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
.await;
if !content.is_empty() {
assistant_text.push_str(content);
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
.await;
}
}
// Forward any reasoning/thinking deltas if present.
@@ -332,25 +333,27 @@ async fn process_chat_sse<S>(
.get("delta")
.and_then(|d| d.get("tool_calls"))
.and_then(|tc| tc.as_array())
&& let Some(tool_call) = tool_calls.first()
{
// Mark that we have an active function call in progress.
fn_call_state.active = true;
if let Some(tool_call) = tool_calls.first() {
// Mark that we have an active function call in progress.
fn_call_state.active = true;
// Extract call_id if present.
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
}
// Extract function details if present.
if let Some(function) = tool_call.get("function") {
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
fn_call_state.name.get_or_insert_with(|| name.to_string());
// Extract call_id if present.
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
}
if let Some(args_fragment) = function.get("arguments").and_then(|a| a.as_str())
{
fn_call_state.arguments.push_str(args_fragment);
// Extract function details if present.
if let Some(function) = tool_call.get("function") {
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
fn_call_state.name.get_or_insert_with(|| name.to_string());
}
if let Some(args_fragment) =
function.get("arguments").and_then(|a| a.as_str())
{
fn_call_state.arguments.push_str(args_fragment);
}
}
}
}
@@ -488,14 +491,15 @@ where
// Only use the final assistant message if we have not
// seen any deltas; otherwise, deltas already built the
// cumulative text and this would duplicate it.
if this.cumulative.is_empty()
&& let crate::models::ResponseItem::Message { content, .. } = &item
&& let Some(text) = content.iter().find_map(|c| match c {
crate::models::ContentItem::OutputText { text } => Some(text),
_ => None,
})
{
this.cumulative.push_str(text);
if this.cumulative.is_empty() {
if let crate::models::ResponseItem::Message { content, .. } = &item {
if let Some(text) = content.iter().find_map(|c| match c {
crate::models::ContentItem::OutputText { text } => Some(text),
_ => None,
}) {
this.cumulative.push_str(text);
}
}
}
// Swallow assistant message here; emit on Completed.

View File

@@ -29,11 +29,12 @@ use crate::client_common::ResponseStream;
use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
use crate::error::Result;
use crate::error::UsageLimitReachedError;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::models::ResponseItem;
@@ -41,8 +42,6 @@ use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
use crate::user_agent::get_codex_user_agent;
use crate::util::backoff;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use std::sync::Arc;
#[derive(Debug, Deserialize)]
@@ -57,7 +56,7 @@ struct Error {
message: Option<String>,
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
auth: Option<CodexAuth>,
@@ -208,7 +207,11 @@ impl ModelClient {
req_builder = req_builder.header("chatgpt-account-id", account_id);
}
let originator = &self.config.responses_originator_header;
let originator = self
.config
.internal_originator
.as_deref()
.unwrap_or("codex_cli_rs");
req_builder = req_builder.header("originator", originator);
req_builder = req_builder.header("User-Agent", get_codex_user_agent(Some(originator)));
@@ -248,12 +251,6 @@ impl ModelClient {
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse::<u64>().ok());
if status == StatusCode::UNAUTHORIZED
&& let Some(a) = auth.as_ref()
{
let _ = a.refresh_token().await;
}
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
// errors. When we bubble early with only the HTTP status the caller sees an opaque
// "unexpected status 400 Bad Request" which makes debugging nearly impossible.
@@ -261,10 +258,7 @@ impl ModelClient {
// exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is
// small and this branch only runs on error paths so the extra allocation is
// negligible.
if !(status == StatusCode::TOO_MANY_REQUESTS
|| status == StatusCode::UNAUTHORIZED
|| status.is_server_error())
{
if !(status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error()) {
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
let body = res.text().await.unwrap_or_default();
return Err(CodexErr::UnexpectedStatus(status, body));
@@ -317,30 +311,6 @@ impl ModelClient {
pub fn get_provider(&self) -> ModelProviderInfo {
self.provider.clone()
}
/// Returns the currently configured model slug.
pub fn get_model(&self) -> String {
self.config.model.clone()
}
/// Returns the currently configured model family.
pub fn get_model_family(&self) -> ModelFamily {
self.config.model_family.clone()
}
/// Returns the current reasoning effort setting.
pub fn get_reasoning_effort(&self) -> ReasoningEffortConfig {
self.effort
}
/// Returns the current reasoning summary setting.
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
self.summary
}
pub fn get_auth(&self) -> Option<CodexAuth> {
self.auth.clone()
}
}
#[derive(Debug, Deserialize, Serialize)]
@@ -639,6 +609,8 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
use serde_json::json;
use tokio::sync::mpsc;

View File

@@ -1,15 +1,19 @@
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::Result;
use crate::model_family::ModelFamily;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::openai_tools::OpenAiTool;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use futures::Stream;
use serde::Serialize;
use std::borrow::Cow;
use std::fmt::Display;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
@@ -19,19 +23,62 @@ use tokio::sync::mpsc;
/// with this content.
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
/// wraps environment context message in a tag for the model to parse more easily.
const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n\n";
const ENVIRONMENT_CONTEXT_END: &str = "\n\n</environment_context>";
/// wraps user instructions message in a tag for the model to parse more easily.
const USER_INSTRUCTIONS_START: &str = "<user_instructions>\n\n";
const USER_INSTRUCTIONS_END: &str = "\n\n</user_instructions>";
/// API request payload for a single model turn
#[derive(Debug, Clone)]
pub(crate) struct EnvironmentContext {
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
}
impl Display for EnvironmentContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"Current working directory: {}",
self.cwd.to_string_lossy()
)?;
writeln!(f, "Approval policy: {}", self.approval_policy)?;
writeln!(f, "Sandbox policy: {}", self.sandbox_policy)?;
let network_access = match self.sandbox_policy.clone() {
SandboxPolicy::DangerFullAccess => "enabled",
SandboxPolicy::ReadOnly => "restricted",
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
if network_access {
"enabled"
} else {
"restricted"
}
}
};
writeln!(f, "Network access: {network_access}")?;
Ok(())
}
}
/// API request payload for a single model turn.
#[derive(Default, Debug, Clone)]
pub struct Prompt {
/// Conversation context input items.
pub input: Vec<ResponseItem>,
/// Optional instructions from the user to amend to the built-in agent
/// instructions.
pub user_instructions: Option<String>,
/// Whether to store response on server side (disable_response_storage = !store).
pub store: bool,
/// A list of key-value pairs that will be added as a developer message
/// for the model to use
pub environment_context: Option<EnvironmentContext>,
/// Tools available to the model, including additional tools sourced from
/// external MCP servers.
pub tools: Vec<OpenAiTool>,
@@ -53,19 +100,36 @@ impl Prompt {
Cow::Owned(sections.join("\n"))
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
self.input.clone()
fn get_formatted_user_instructions(&self) -> Option<String> {
self.user_instructions
.as_ref()
.map(|ui| format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"))
}
/// Creates a formatted user instructions message from a string
pub(crate) fn format_user_instructions_message(ui: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"),
}],
fn get_formatted_environment_context(&self) -> Option<String> {
self.environment_context
.as_ref()
.map(|ec| format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"))
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
let mut input_with_instructions = Vec::with_capacity(self.input.len() + 2);
if let Some(ec) = self.get_formatted_environment_context() {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ec }],
});
}
if let Some(ui) = self.get_formatted_user_instructions() {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ui }],
});
}
input_with_instructions.extend(self.input.clone());
input_with_instructions
}
}
@@ -85,8 +149,53 @@ pub enum ResponseEvent {
#[derive(Debug, Serialize)]
pub(crate) struct Reasoning {
pub(crate) effort: ReasoningEffortConfig,
pub(crate) summary: ReasoningSummaryConfig,
pub(crate) effort: OpenAiReasoningEffort,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) summary: Option<OpenAiReasoningSummary>,
}
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningEffort {
Low,
#[default]
Medium,
High,
}
impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
fn from(effort: ReasoningEffortConfig) -> Self {
match effort {
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
ReasoningEffortConfig::None => None,
}
}
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
}
impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
fn from(summary: ReasoningSummaryConfig) -> Self {
match summary {
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
ReasoningSummaryConfig::None => None,
}
}
}
/// Request object that is serialized as JSON and POST'ed when using the
@@ -117,7 +226,12 @@ pub(crate) fn create_reasoning_param_for_request(
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if model_family.supports_reasoning_summaries {
Some(Reasoning { effort, summary })
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
Some(Reasoning {
effort,
summary: summary.into(),
})
} else {
None
}
@@ -137,6 +251,7 @@ impl Stream for ResponseStream {
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used)]
use crate::model_family::find_family_for_model;
use super::*;
@@ -144,6 +259,7 @@ mod tests {
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
user_instructions: Some("custom instruction".to_string()),
..Default::default()
};
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,9 @@
use crate::config_profile::ConfigProfile;
use crate::config_types::History;
use crate::config_types::McpServerConfig;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::config_types::SandboxMode;
use crate::config_types::SandboxWorkspaceWrite;
use crate::config_types::ShellEnvironmentPolicy;
use crate::config_types::ShellEnvironmentPolicyToml;
@@ -13,10 +16,6 @@ use crate::model_provider_info::built_in_model_providers;
use crate::openai_model_info::get_model_info;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use codex_login::AuthMode;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use dirs::home_dir;
use serde::Deserialize;
use std::collections::HashMap;
@@ -35,8 +34,6 @@ pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
const CONFIG_TOML_FILE: &str = "config.toml";
const DEFAULT_RESPONSES_ORIGINATOR_HEADER: &str = "codex_cli_rs";
/// Application configuration loaded from disk and merged with overrides.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
@@ -142,8 +139,8 @@ pub struct Config {
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
pub codex_linux_sandbox_exe: Option<PathBuf>,
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
/// If not "none", the value to use for `reasoning.effort` when making a
/// request using the Responses API.
pub model_reasoning_effort: ReasoningEffort,
/// If not "none", the value to use for `reasoning.summary` when making a
@@ -153,22 +150,11 @@ pub struct Config {
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: String,
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
pub experimental_resume: Option<PathBuf>,
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
pub include_plan_tool: bool,
/// Include the `apply_patch` tool for models that benefit from invoking
/// file edits as a structured tool call. When unset, this falls back to the
/// model family's default preference.
pub include_apply_patch_tool: bool,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header: String,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: AuthMode,
pub internal_originator: Option<String>,
}
impl Config {
@@ -405,19 +391,13 @@ pub struct ConfigToml {
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
pub experimental_resume: Option<PathBuf>,
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
pub experimental_instructions_file: Option<PathBuf>,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header_internal_override: Option<String>,
pub internal_originator: Option<String>,
pub projects: Option<HashMap<String, ProjectConfig>>,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: Option<AuthMode>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
@@ -494,7 +474,6 @@ pub struct ConfigOverrides {
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub include_plan_tool: Option<bool>,
pub include_apply_patch_tool: Option<bool>,
pub disable_response_storage: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
}
@@ -520,7 +499,6 @@ impl Config {
codex_linux_sandbox_exe,
base_instructions,
include_plan_tool,
include_apply_patch_tool,
disable_response_storage,
show_raw_agent_reasoning,
} = overrides;
@@ -597,7 +575,6 @@ impl Config {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
}
});
@@ -610,9 +587,6 @@ impl Config {
.as_ref()
.map(|info| info.max_output_tokens)
});
let experimental_resume = cfg.experimental_resume;
// Load base instructions override from a file if specified. If the
// path is relative, resolve it against the effective cwd so the
// behaviour matches other path-like config values.
@@ -623,14 +597,6 @@ impl Config {
let file_base_instructions =
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
let base_instructions = base_instructions.or(file_base_instructions);
let include_apply_patch_tool_val =
include_apply_patch_tool.unwrap_or(model_family.uses_apply_patch_tool);
let responses_originator_header: String = cfg
.responses_originator_header_internal_override
.unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
let config = Self {
model,
model_family,
@@ -680,12 +646,8 @@ impl Config {
.chatgpt_base_url
.or(cfg.chatgpt_base_url)
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
experimental_resume,
include_plan_tool: include_plan_tool.unwrap_or(false),
include_apply_patch_tool: include_apply_patch_tool_val,
responses_originator_header,
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
internal_originator: cfg.internal_originator,
};
Ok(config)
}
@@ -765,10 +727,10 @@ fn default_model() -> String {
pub fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
if let Ok(val) = std::env::var("CODEX_HOME") {
if !val.is_empty() {
return PathBuf::from(val).canonicalize();
}
}
let mut p = home_dir().ok_or_else(|| {
@@ -791,6 +753,7 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use crate::config_types::HistoryPersistence;
use super::*;
@@ -1045,12 +1008,9 @@ disable_response_storage = true
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
},
o3_profile_config
);
@@ -1098,12 +1058,9 @@ disable_response_storage = true
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -1166,12 +1123,9 @@ disable_response_storage = true
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -1,9 +1,9 @@
use serde::Deserialize;
use std::path::PathBuf;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
/// Collection of common configuration options that a user can define as a unit
/// in `config.toml`.

View File

@@ -5,9 +5,11 @@
use std::collections::HashMap;
use std::path::PathBuf;
use strum_macros::Display;
use wildmatch::WildMatchPattern;
use serde::Deserialize;
use serde::Serialize;
#[derive(Deserialize, Debug, Clone, PartialEq)]
pub struct McpServerConfig {
@@ -76,6 +78,20 @@ pub enum HistoryPersistence {
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Tui {}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize)]
#[serde(rename_all = "kebab-case")]
pub enum SandboxMode {
#[serde(rename = "read-only")]
#[default]
ReadOnly,
#[serde(rename = "workspace-write")]
WorkspaceWrite,
#[serde(rename = "danger-full-access")]
DangerFullAccess,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct SandboxWorkspaceWrite {
#[serde(default)]
@@ -183,3 +199,31 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
}
}
}
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort {
Low,
#[default]
Medium,
High,
/// Option to disable reasoning.
None,
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
/// Option to disable reasoning summaries.
None,
}

View File

@@ -7,7 +7,6 @@ use uuid::Uuid;
use crate::codex::Codex;
use crate::codex::CodexSpawnOk;
use crate::codex::INITIAL_SUBMIT_ID;
use crate::codex_conversation::CodexConversation;
use crate::config::Config;
use crate::error::CodexErr;
@@ -40,7 +39,7 @@ impl Default for ConversationManager {
impl ConversationManager {
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
let auth = CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method)?;
let auth = CodexAuth::from_codex_home(&config.codex_home)?;
self.new_conversation_with_auth(config, auth).await
}
@@ -53,6 +52,7 @@ impl ConversationManager {
) -> CodexResult<NewConversation> {
let CodexSpawnOk {
codex,
init_id,
session_id: conversation_id,
} = Codex::spawn(config, auth).await?;
@@ -64,7 +64,7 @@ impl ConversationManager {
Event {
id,
msg: EventMsg::SessionConfigured(session_configured),
} if id == INITIAL_SUBMIT_ID => session_configured,
} if id == init_id => session_configured,
_ => {
return Err(CodexErr::SessionConfiguredNotFirstEvent);
}

View File

@@ -1,86 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display as DeriveDisplay;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use codex_protocol::config_types::SandboxMode;
use std::fmt::Display;
use std::path::PathBuf;
/// wraps environment context message in a tag for the model to parse more easily.
pub(crate) const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n";
pub(crate) const ENVIRONMENT_CONTEXT_END: &str = "</environment_context>";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, DeriveDisplay)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum NetworkAccess {
Restricted,
Enabled,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename = "environment_context", rename_all = "snake_case")]
pub(crate) struct EnvironmentContext {
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_mode: SandboxMode,
pub network_access: NetworkAccess,
}
impl EnvironmentContext {
pub fn new(
cwd: PathBuf,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
) -> Self {
Self {
cwd,
approval_policy,
sandbox_mode: match sandbox_policy {
SandboxPolicy::DangerFullAccess => SandboxMode::DangerFullAccess,
SandboxPolicy::ReadOnly => SandboxMode::ReadOnly,
SandboxPolicy::WorkspaceWrite { .. } => SandboxMode::WorkspaceWrite,
},
network_access: match sandbox_policy {
SandboxPolicy::DangerFullAccess => NetworkAccess::Enabled,
SandboxPolicy::ReadOnly => NetworkAccess::Restricted,
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
if network_access {
NetworkAccess::Enabled
} else {
NetworkAccess::Restricted
}
}
},
}
}
}
impl Display for EnvironmentContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"Current working directory: {}",
self.cwd.to_string_lossy()
)?;
writeln!(f, "Approval policy: {}", self.approval_policy)?;
writeln!(f, "Sandbox mode: {}", self.sandbox_mode)?;
writeln!(f, "Network access: {}", self.network_access)?;
Ok(())
}
}
impl From<EnvironmentContext> for ResponseItem {
fn from(ec: EnvironmentContext) -> Self {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"),
}],
}
}
}

View File

@@ -3,6 +3,7 @@ use std::os::unix::process::ExitStatusExt;
use std::collections::HashMap;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::process::ExitStatus;
use std::time::Duration;
@@ -17,7 +18,6 @@ use tokio::process::Child;
use crate::error::CodexErr;
use crate::error::Result;
use crate::error::SandboxErr;
use crate::landlock::spawn_command_under_linux_sandbox;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::ExecCommandOutputDeltaEvent;
@@ -163,6 +163,65 @@ pub async fn process_exec_tool_call(
}
}
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
/// (codex-linux-sandbox).
///
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
/// the equivalent CLI options.
pub async fn spawn_command_under_linux_sandbox<P>(
codex_linux_sandbox_exe: P,
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child>
where
P: AsRef<Path>,
{
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(
codex_linux_sandbox_exe.as_ref().to_path_buf(),
args,
arg0,
cwd,
sandbox_policy,
stdio_policy,
env,
)
.await
}
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
fn create_linux_sandbox_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
) -> Vec<String> {
#[expect(clippy::expect_used)]
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
#[expect(clippy::expect_used)]
let sandbox_policy_json =
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
let mut linux_cmd: Vec<String> = vec![
sandbox_policy_cwd,
sandbox_policy_json,
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
"--".to_string(),
];
// Append the original tool command.
linux_cmd.extend(command);
linux_cmd
}
/// We don't have a fully deterministic way to tell if our command failed
/// because of the sandbox - a command in the user's zshrc file might hit an
/// error, but the command itself might fail or succeed for other reasons.

View File

@@ -70,6 +70,8 @@ where
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use super::*;
use crate::config_types::ShellEnvironmentPolicyInherit;
use maplit::hashmap;

View File

@@ -1,16 +1,11 @@
use std::collections::HashSet;
use std::path::Path;
use codex_protocol::mcp_protocol::GitSha;
use futures::future::join_all;
use serde::Deserialize;
use serde::Serialize;
use tokio::process::Command;
use tokio::time::Duration as TokioDuration;
use tokio::time::timeout;
use crate::util::is_inside_git_repo;
/// Timeout for git commands to prevent freezing on large repositories
const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5);
@@ -27,12 +22,6 @@ pub struct GitInfo {
pub repository_url: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct GitDiffToRemote {
pub sha: GitSha,
pub diff: String,
}
/// Collect git repository information from the given working directory using command-line git.
/// Returns None if no git repository is found or if git operations fail.
/// Uses timeouts to prevent freezing on large repositories.
@@ -62,52 +51,38 @@ pub async fn collect_git_info(cwd: &Path) -> Option<GitInfo> {
};
// Process commit hash
if let Some(output) = commit_result
&& output.status.success()
&& let Ok(hash) = String::from_utf8(output.stdout)
{
git_info.commit_hash = Some(hash.trim().to_string());
if let Some(output) = commit_result {
if output.status.success() {
if let Ok(hash) = String::from_utf8(output.stdout) {
git_info.commit_hash = Some(hash.trim().to_string());
}
}
}
// Process branch name
if let Some(output) = branch_result
&& output.status.success()
&& let Ok(branch) = String::from_utf8(output.stdout)
{
let branch = branch.trim();
if branch != "HEAD" {
git_info.branch = Some(branch.to_string());
if let Some(output) = branch_result {
if output.status.success() {
if let Ok(branch) = String::from_utf8(output.stdout) {
let branch = branch.trim();
if branch != "HEAD" {
git_info.branch = Some(branch.to_string());
}
}
}
}
// Process repository URL
if let Some(output) = url_result
&& output.status.success()
&& let Ok(url) = String::from_utf8(output.stdout)
{
git_info.repository_url = Some(url.trim().to_string());
if let Some(output) = url_result {
if output.status.success() {
if let Ok(url) = String::from_utf8(output.stdout) {
git_info.repository_url = Some(url.trim().to_string());
}
}
}
Some(git_info)
}
/// Returns the closest git sha to HEAD that is on a remote as well as the diff to that sha.
pub async fn git_diff_to_remote(cwd: &Path) -> Option<GitDiffToRemote> {
if !is_inside_git_repo(cwd) {
return None;
}
let remotes = get_git_remotes(cwd).await?;
let branches = branch_ancestry(cwd).await?;
let base_sha = find_closest_sha(cwd, &branches, &remotes).await?;
let diff = diff_against_sha(cwd, &base_sha).await?;
Some(GitDiffToRemote {
sha: base_sha,
diff,
})
}
/// Run a git command with a timeout to prevent blocking on large repositories
async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::process::Output> {
let result = timeout(
@@ -122,311 +97,11 @@ async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::
}
}
async fn get_git_remotes(cwd: &Path) -> Option<Vec<String>> {
let output = run_git_command_with_timeout(&["remote"], cwd).await?;
if !output.status.success() {
return None;
}
let mut remotes: Vec<String> = String::from_utf8(output.stdout)
.ok()?
.lines()
.map(|s| s.to_string())
.collect();
if let Some(pos) = remotes.iter().position(|r| r == "origin") {
let origin = remotes.remove(pos);
remotes.insert(0, origin);
}
Some(remotes)
}
/// Attempt to determine the repository's default branch name.
///
/// Preference order:
/// 1) The symbolic ref at `refs/remotes/<remote>/HEAD` for the first remote (origin prioritized)
/// 2) `git remote show <remote>` parsed for "HEAD branch: <name>"
/// 3) Local fallback to existing `main` or `master` if present
async fn get_default_branch(cwd: &Path) -> Option<String> {
// Prefer the first remote (with origin prioritized)
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
for remote in remotes {
// Try symbolic-ref, which returns something like: refs/remotes/origin/main
if let Some(symref_output) = run_git_command_with_timeout(
&[
"symbolic-ref",
"--quiet",
&format!("refs/remotes/{remote}/HEAD"),
],
cwd,
)
.await
&& symref_output.status.success()
&& let Ok(sym) = String::from_utf8(symref_output.stdout)
{
let trimmed = sym.trim();
if let Some((_, name)) = trimmed.rsplit_once('/') {
return Some(name.to_string());
}
}
// Fall back to parsing `git remote show <remote>` output
if let Some(show_output) =
run_git_command_with_timeout(&["remote", "show", &remote], cwd).await
&& show_output.status.success()
&& let Ok(text) = String::from_utf8(show_output.stdout)
{
for line in text.lines() {
let line = line.trim();
if let Some(rest) = line.strip_prefix("HEAD branch:") {
let name = rest.trim();
if !name.is_empty() {
return Some(name.to_string());
}
}
}
}
}
// No remote-derived default; try common local defaults if they exist
for candidate in ["main", "master"] {
if let Some(verify) = run_git_command_with_timeout(
&[
"rev-parse",
"--verify",
"--quiet",
&format!("refs/heads/{candidate}"),
],
cwd,
)
.await
&& verify.status.success()
{
return Some(candidate.to_string());
}
}
None
}
/// Build an ancestry of branches starting at the current branch and ending at the
/// repository's default branch (if determinable)..
async fn branch_ancestry(cwd: &Path) -> Option<Vec<String>> {
// Discover current branch (ignore detached HEAD by treating it as None)
let current_branch = run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd)
.await
.and_then(|o| {
if o.status.success() {
String::from_utf8(o.stdout).ok()
} else {
None
}
})
.map(|s| s.trim().to_string())
.filter(|s| s != "HEAD");
// Discover default branch
let default_branch = get_default_branch(cwd).await;
let mut ancestry: Vec<String> = Vec::new();
let mut seen: HashSet<String> = HashSet::new();
if let Some(cb) = current_branch.clone() {
seen.insert(cb.clone());
ancestry.push(cb);
}
if let Some(db) = default_branch
&& !seen.contains(&db)
{
seen.insert(db.clone());
ancestry.push(db);
}
// Expand candidates: include any remote branches that already contain HEAD.
// This addresses cases where we're on a new local-only branch forked from a
// remote branch that isn't the repository default. We prioritize remotes in
// the order returned by get_git_remotes (origin first).
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
for remote in remotes {
if let Some(output) = run_git_command_with_timeout(
&[
"for-each-ref",
"--format=%(refname:short)",
"--contains=HEAD",
&format!("refs/remotes/{remote}"),
],
cwd,
)
.await
&& output.status.success()
&& let Ok(text) = String::from_utf8(output.stdout)
{
for line in text.lines() {
let short = line.trim();
// Expect format like: "origin/feature"; extract the branch path after "remote/"
if let Some(stripped) = short.strip_prefix(&format!("{remote}/"))
&& !stripped.is_empty()
&& !seen.contains(stripped)
{
seen.insert(stripped.to_string());
ancestry.push(stripped.to_string());
}
}
}
}
// Ensure we return Some vector, even if empty, to allow caller logic to proceed
Some(ancestry)
}
// Helper for a single branch: return the remote SHA if present on any remote
// and the distance (commits ahead of HEAD) for that branch. The first item is
// None if the branch is not present on any remote. Returns None if distance
// could not be computed due to git errors/timeouts.
async fn branch_remote_and_distance(
cwd: &Path,
branch: &str,
remotes: &[String],
) -> Option<(Option<GitSha>, usize)> {
// Try to find the first remote ref that exists for this branch (origin prioritized by caller).
let mut found_remote_sha: Option<GitSha> = None;
let mut found_remote_ref: Option<String> = None;
for remote in remotes {
let remote_ref = format!("refs/remotes/{remote}/{branch}");
let Some(verify_output) =
run_git_command_with_timeout(&["rev-parse", "--verify", "--quiet", &remote_ref], cwd)
.await
else {
// Mirror previous behavior: if the verify call times out/fails at the process level,
// treat the entire branch as unusable.
return None;
};
if !verify_output.status.success() {
continue;
}
let Ok(sha) = String::from_utf8(verify_output.stdout) else {
// Mirror previous behavior and skip the entire branch on parse failure.
return None;
};
found_remote_sha = Some(GitSha::new(sha.trim()));
found_remote_ref = Some(remote_ref);
break;
}
// Compute distance as the number of commits HEAD is ahead of the branch.
// Prefer local branch name if it exists; otherwise fall back to the remote ref (if any).
let count_output = if let Some(local_count) =
run_git_command_with_timeout(&["rev-list", "--count", &format!("{branch}..HEAD")], cwd)
.await
{
if local_count.status.success() {
local_count
} else if let Some(remote_ref) = &found_remote_ref {
match run_git_command_with_timeout(
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
cwd,
)
.await
{
Some(remote_count) => remote_count,
None => return None,
}
} else {
return None;
}
} else if let Some(remote_ref) = &found_remote_ref {
match run_git_command_with_timeout(
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
cwd,
)
.await
{
Some(remote_count) => remote_count,
None => return None,
}
} else {
return None;
};
if !count_output.status.success() {
return None;
}
let Ok(distance_str) = String::from_utf8(count_output.stdout) else {
return None;
};
let Ok(distance) = distance_str.trim().parse::<usize>() else {
return None;
};
Some((found_remote_sha, distance))
}
// Finds the closest sha that exist on any of branches and also exists on any of the remotes.
async fn find_closest_sha(cwd: &Path, branches: &[String], remotes: &[String]) -> Option<GitSha> {
// A sha and how many commits away from HEAD it is.
let mut closest_sha: Option<(GitSha, usize)> = None;
for branch in branches {
let Some((maybe_remote_sha, distance)) =
branch_remote_and_distance(cwd, branch, remotes).await
else {
continue;
};
let Some(remote_sha) = maybe_remote_sha else {
// Preserve existing behavior: skip branches that are not present on a remote.
continue;
};
match &closest_sha {
None => closest_sha = Some((remote_sha, distance)),
Some((_, best_distance)) if distance < *best_distance => {
closest_sha = Some((remote_sha, distance));
}
_ => {}
}
}
closest_sha.map(|(sha, _)| sha)
}
async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> {
let output = run_git_command_with_timeout(&["diff", &sha.0], cwd).await?;
// 0 is success and no diff.
// 1 is success but there is a diff.
let exit_ok = output.status.code().is_some_and(|c| c == 0 || c == 1);
if !exit_ok {
return None;
}
let mut diff = String::from_utf8(output.stdout).ok()?;
if let Some(untracked_output) =
run_git_command_with_timeout(&["ls-files", "--others", "--exclude-standard"], cwd).await
&& untracked_output.status.success()
{
let untracked: Vec<String> = String::from_utf8(untracked_output.stdout)
.ok()?
.lines()
.map(|s| s.to_string())
.filter(|s| !s.is_empty())
.collect();
if !untracked.is_empty() {
let futures_iter = untracked.into_iter().map(|file| async move {
let file_owned = file;
let args_vec: Vec<&str> =
vec!["diff", "--binary", "--no-index", "/dev/null", &file_owned];
run_git_command_with_timeout(&args_vec, cwd).await
});
let results = join_all(futures_iter).await;
for extra in results.into_iter().flatten() {
if extra.status.code().is_some_and(|c| c == 0 || c == 1)
&& let Ok(s) = String::from_utf8(extra.stdout)
{
diff.push_str(&s);
}
}
}
}
Some(diff)
}
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used)]
#![allow(clippy::unwrap_used)]
use super::*;
use std::fs;
@@ -435,8 +110,7 @@ mod tests {
// Helper function to create a test git repository
async fn create_test_git_repo(temp_dir: &TempDir) -> PathBuf {
let repo_path = temp_dir.path().join("repo");
fs::create_dir(&repo_path).expect("Failed to create repo dir");
let repo_path = temp_dir.path().to_path_buf();
let envs = vec![
("GIT_CONFIG_GLOBAL", "/dev/null"),
("GIT_CONFIG_NOSYSTEM", "1"),
@@ -491,41 +165,6 @@ mod tests {
repo_path
}
async fn create_test_git_repo_with_remote(temp_dir: &TempDir) -> (PathBuf, String) {
let repo_path = create_test_git_repo(temp_dir).await;
let remote_path = temp_dir.path().join("remote.git");
Command::new("git")
.args(["init", "--bare", remote_path.to_str().unwrap()])
.output()
.await
.expect("Failed to init bare remote");
Command::new("git")
.args(["remote", "add", "origin", remote_path.to_str().unwrap()])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to add remote");
let output = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to get branch");
let branch = String::from_utf8(output.stdout).unwrap().trim().to_string();
Command::new("git")
.args(["push", "-u", "origin", &branch])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to push initial commit");
(repo_path, branch)
}
#[tokio::test]
async fn test_collect_git_info_non_git_directory() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
@@ -639,136 +278,6 @@ mod tests {
assert_eq!(git_info.branch, Some("feature-branch".to_string()));
}
#[tokio::test]
async fn test_get_git_working_tree_state_clean_repo() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.is_empty());
}
#[tokio::test]
async fn test_get_git_working_tree_state_with_changes() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let tracked = repo_path.join("test.txt");
fs::write(&tracked, "modified").unwrap();
fs::write(repo_path.join("untracked.txt"), "new").unwrap();
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.contains("test.txt"));
assert!(state.diff.contains("untracked.txt"));
}
#[tokio::test]
async fn test_get_git_working_tree_state_branch_fallback() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, _branch) = create_test_git_repo_with_remote(&temp_dir).await;
Command::new("git")
.args(["checkout", "-b", "feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to create feature branch");
Command::new("git")
.args(["push", "-u", "origin", "feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to push feature branch");
Command::new("git")
.args(["checkout", "-b", "local-branch"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to create local branch");
let remote_sha = Command::new("git")
.args(["rev-parse", "origin/feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
}
#[tokio::test]
async fn test_get_git_working_tree_state_unpushed_commit() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
fs::write(repo_path.join("test.txt"), "updated").unwrap();
Command::new("git")
.args(["add", "test.txt"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to add file");
Command::new("git")
.args(["commit", "-m", "local change"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to commit");
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.contains("updated"));
}
#[test]
fn test_git_info_serialization() {
let git_info = GitInfo {

View File

@@ -12,17 +12,20 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
// introduce side effects ( "&&", "||", ";", and "|" ). If every
// individual command in the script is itself a knownsafe command, then
// the composite expression is considered safe.
if let [bash, flag, script] = command
&& bash == "bash"
&& flag == "-lc"
&& let Some(tree) = try_parse_bash(script)
&& let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script)
&& !all_commands.is_empty()
&& all_commands
.iter()
.all(|cmd| is_safe_to_call_with_exec(cmd))
{
return true;
if let [bash, flag, script] = command {
if bash == "bash" && flag == "-lc" {
if let Some(tree) = try_parse_bash(script) {
if let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script) {
if !all_commands.is_empty()
&& all_commands
.iter()
.all(|cmd| is_safe_to_call_with_exec(cmd))
{
return true;
}
}
}
}
}
false
@@ -159,6 +162,7 @@ fn is_valid_sed_n_arg(arg: Option<&str>) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
fn vec_str(args: &[&str]) -> Vec<String> {

View File

@@ -1,66 +0,0 @@
use crate::protocol::SandboxPolicy;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tokio::process::Child;
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
/// (codex-linux-sandbox).
///
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
/// the equivalent CLI options.
pub async fn spawn_command_under_linux_sandbox<P>(
codex_linux_sandbox_exe: P,
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child>
where
P: AsRef<Path>,
{
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(
codex_linux_sandbox_exe.as_ref().to_path_buf(),
args,
arg0,
cwd,
sandbox_policy,
stdio_policy,
env,
)
.await
}
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
fn create_linux_sandbox_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
) -> Vec<String> {
#[expect(clippy::expect_used)]
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
#[expect(clippy::expect_used)]
let sandbox_policy_json =
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
let mut linux_cmd: Vec<String> = vec![
sandbox_policy_cwd,
sandbox_policy_json,
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
"--".to_string(),
];
// Append the original tool command.
linux_cmd.extend(command);
linux_cmd
}

View File

@@ -17,14 +17,12 @@ pub mod config;
pub mod config_profile;
pub mod config_types;
mod conversation_history;
mod environment_context;
pub mod error;
pub mod exec;
pub mod exec_env;
mod flags;
pub mod git_info;
mod is_safe_command;
pub mod landlock;
mod mcp_connection_manager;
mod mcp_tool_call;
mod message_history;
@@ -44,21 +42,15 @@ mod openai_model_info;
mod openai_tools;
pub mod plan_tool;
mod project_doc;
pub mod protocol;
mod rollout;
pub(crate) mod safety;
pub mod seatbelt;
pub mod shell;
pub mod spawn;
pub mod terminal;
pub mod turn_diff_tracker;
pub mod user_agent;
mod user_notification;
pub mod util;
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
pub use safety::get_platform_sandbox;
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
// `codex_core::protocol::...` references continue to work across the workspace.
pub use codex_protocol::protocol;
// Re-export protocol config enums to ensure call sites can use the same types
// as those in the protocol crate when constructing protocol messages.
pub use codex_protocol::config_types as protocol_config_types;

View File

@@ -281,6 +281,7 @@ fn is_valid_mcp_server_name(server_name: &str) -> bool {
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use mcp_types::ToolInputSchema;

View File

@@ -34,8 +34,6 @@ use crate::config_types::HistoryPersistence;
use std::os::unix::fs::OpenOptionsExt;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
/// Filename that stores the message history inside `~/.codex`.
const HISTORY_FILENAME: &str = "history.jsonl";
@@ -127,12 +125,11 @@ pub(crate) async fn append_entry(text: &str, session_id: &Uuid, config: &Config)
/// times if the lock is currently held by another process. This prevents a
/// potential indefinite wait while still giving other writers some time to
/// finish their operation.
#[cfg(unix)]
async fn acquire_exclusive_lock_with_retry(file: &File) -> Result<()> {
async fn acquire_exclusive_lock_with_retry(file: &std::fs::File) -> Result<()> {
use tokio::time::sleep;
for _ in 0..MAX_RETRIES {
match try_flock_exclusive(file) {
match fs2::FileExt::try_lock_exclusive(file) {
Ok(()) => return Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
sleep(RETRY_SLEEP).await;
@@ -147,12 +144,6 @@ async fn acquire_exclusive_lock_with_retry(file: &File) -> Result<()> {
))
}
#[cfg(not(unix))]
async fn acquire_exclusive_lock_with_retry(_file: &File) -> Result<()> {
// On non-Unix, skip locking; appends are still atomic with O_APPEND.
Ok(())
}
/// Asynchronously fetch the history file's *identifier* (inode on Unix) and
/// the current number of entries by counting newline characters.
pub(crate) async fn history_metadata(config: &Config) -> (u64, usize) {
@@ -268,7 +259,7 @@ pub(crate) fn lookup(log_id: u64, offset: usize, config: &Config) -> Option<Hist
#[cfg(unix)]
fn acquire_shared_lock_with_retry(file: &File) -> Result<()> {
for _ in 0..MAX_RETRIES {
match try_flock_shared(file) {
match fs2::FileExt::try_lock_shared(file) {
Ok(()) => return Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
std::thread::sleep(RETRY_SLEEP);
@@ -283,45 +274,6 @@ fn acquire_shared_lock_with_retry(file: &File) -> Result<()> {
))
}
#[cfg(not(unix))]
fn acquire_shared_lock_with_retry(_file: &File) -> Result<()> {
Ok(())
}
#[cfg(unix)]
fn try_flock_exclusive(file: &File) -> Result<()> {
let fd = file.as_raw_fd();
let rc = unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) };
if rc == 0 {
Ok(())
} else {
let err = std::io::Error::last_os_error();
match err.raw_os_error() {
Some(code) if code == libc::EWOULDBLOCK || code == libc::EAGAIN => Err(
std::io::Error::new(std::io::ErrorKind::WouldBlock, "lock would block"),
),
_ => Err(err),
}
}
}
#[cfg(unix)]
fn try_flock_shared(file: &File) -> Result<()> {
let fd = file.as_raw_fd();
let rc = unsafe { libc::flock(fd, libc::LOCK_SH | libc::LOCK_NB) };
if rc == 0 {
Ok(())
} else {
let err = std::io::Error::last_os_error();
match err.raw_os_error() {
Some(code) if code == libc::EWOULDBLOCK || code == libc::EAGAIN => Err(
std::io::Error::new(std::io::ErrorKind::WouldBlock, "lock would block"),
),
_ => Err(err),
}
}
}
/// On Unix systems ensure the file permissions are `0o600` (rw-------). If the
/// permissions cannot be changed the error is propagated to the caller.
#[cfg(unix)]

View File

@@ -23,10 +23,6 @@ pub struct ModelFamily {
// the model such that its description can be omitted.
// See https://platform.openai.com/docs/guides/tools-local-shell
pub uses_local_shell_tool: bool,
/// True if the model performs better when `apply_patch` is provided as
/// a tool call instead of just a bash command.
pub uses_apply_patch_tool: bool,
}
macro_rules! model_family {
@@ -40,7 +36,6 @@ macro_rules! model_family {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
};
// apply overrides
$(
@@ -60,7 +55,6 @@ macro_rules! simple_model_family {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
})
}};
}
@@ -94,10 +88,10 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
slug, "gpt-4.1",
needs_special_apply_patch_instructions: true,
)
} else if slug.starts_with("gpt-oss") {
model_family!(slug, "gpt-oss", uses_apply_patch_tool: true)
} else if slug.starts_with("gpt-4o") {
simple_model_family!(slug, "gpt-4o")
} else if slug.starts_with("gpt-oss") {
simple_model_family!(slug, "gpt-oss")
} else if slug.starts_with("gpt-3.5") {
simple_model_family!(slug, "gpt-3.5")
} else if slug.starts_with("gpt-5") {

View File

@@ -167,10 +167,10 @@ impl ModelProviderInfo {
if let Some(env_headers) = &self.env_http_headers {
for (header, env_var) in env_headers {
if let Ok(val) = std::env::var(env_var)
&& !val.trim().is_empty()
{
builder = builder.header(header, val);
if let Ok(val) = std::env::var(env_var) {
if !val.trim().is_empty() {
builder = builder.header(header, val);
}
}
}
}
@@ -322,6 +322,7 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;

View File

@@ -183,7 +183,6 @@ impl From<Vec<InputItem>> for ResponseInputItem {
None
}
},
_ => None,
})
.collect::<Vec<ContentItem>>(),
}
@@ -267,6 +266,7 @@ impl std::ops::Deref for FunctionCallOutputPayload {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]

View File

@@ -43,7 +43,6 @@ pub enum ConfigShellToolType {
pub struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool: bool,
}
impl ToolsConfig {
@@ -52,7 +51,6 @@ impl ToolsConfig {
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
include_plan_tool: bool,
include_apply_patch_tool: bool,
) -> Self {
let mut shell_type = if model_family.uses_local_shell_tool {
ConfigShellToolType::LocalShell
@@ -68,7 +66,6 @@ impl ToolsConfig {
Self {
shell_type,
plan_tool: include_plan_tool,
apply_patch_tool: include_apply_patch_tool || model_family.uses_apply_patch_tool,
}
}
}
@@ -238,87 +235,6 @@ The shell tool is used to execute shell commands.
})
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ApplyPatchToolArgs {
pub(crate) input: String,
}
fn create_apply_patch_tool() -> OpenAiTool {
// Minimal schema: one required string argument containing the patch body
let mut properties = BTreeMap::new();
properties.insert(
"input".to_string(),
JsonSchema::String {
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
},
);
OpenAiTool::Function(ResponsesApiTool {
name: "apply_patch".to_string(),
description: r#"Use this tool to edit files.
Your patch language is a strippeddown, fileoriented diff format designed to be easy to parse and safe to apply. You can think of it as a highlevel envelope:
**_ Begin Patch
[ one or more file sections ]
_** End Patch
Within that envelope, you get a sequence of file operations.
You MUST include a header to specify the action you are taking.
Each operation starts with one of three headers:
**_ Add File: <path> - create a new file. Every following line is a + line (the initial contents).
_** Delete File: <path> - remove an existing file. Nothing follows.
\*\*\* Update File: <path> - patch an existing file in place (optionally with a rename).
May be immediately followed by \*\*\* Move to: <new path> if you want to rename the file.
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
Within a hunk each line starts with:
- for inserted text,
* for removed text, or
space ( ) for context.
At the end of a truncated hunk you can emit \*\*\* End of File.
Patch := Begin { FileOp } End
Begin := "**_ Begin Patch" NEWLINE
End := "_** End Patch" NEWLINE
FileOp := AddFile | DeleteFile | UpdateFile
AddFile := "**_ Add File: " path NEWLINE { "+" line NEWLINE }
DeleteFile := "_** Delete File: " path NEWLINE
UpdateFile := "**_ Update File: " path NEWLINE [ MoveTo ] { Hunk }
MoveTo := "_** Move to: " newPath NEWLINE
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
HunkLine := (" " | "-" | "+") text NEWLINE
A full patch can combine several operations:
**_ Begin Patch
_** Add File: hello.txt
+Hello world
**_ Update File: src/app.py
_** Move to: src/main.py
@@ def greet():
-print("Hi")
+print("Hello, world!")
**_ Delete File: obsolete.txt
_** End Patch
It is important to remember:
- You must include a header with your intended action (Add/Delete/Update)
- You must prefix new lines with `+` even when creating a new file
"#
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["input".to_string()]),
additional_properties: Some(false),
},
})
}
/// Returns JSON values that are compatible with Function Calling in the
/// Responses API:
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
@@ -420,11 +336,11 @@ fn sanitize_json_schema(value: &mut JsonValue) {
}
JsonValue::Object(map) => {
// First, recursively sanitize known nested schema holders
if let Some(props) = map.get_mut("properties")
&& let Some(props_map) = props.as_object_mut()
{
for (_k, v) in props_map.iter_mut() {
sanitize_json_schema(v);
if let Some(props) = map.get_mut("properties") {
if let Some(props_map) = props.as_object_mut() {
for (_k, v) in props_map.iter_mut() {
sanitize_json_schema(v);
}
}
}
if let Some(items) = map.get_mut("items") {
@@ -444,18 +360,18 @@ fn sanitize_json_schema(value: &mut JsonValue) {
.map(|s| s.to_string());
// If type is an array (union), pick first supported; else leave to inference
if ty.is_none()
&& let Some(JsonValue::Array(types)) = map.get("type")
{
for t in types {
if let Some(tt) = t.as_str()
&& matches!(
tt,
"object" | "array" | "string" | "number" | "integer" | "boolean"
)
{
ty = Some(tt.to_string());
break;
if ty.is_none() {
if let Some(JsonValue::Array(types)) = map.get("type") {
for t in types {
if let Some(tt) = t.as_str() {
if matches!(
tt,
"object" | "array" | "string" | "number" | "integer" | "boolean"
) {
ty = Some(tt.to_string());
break;
}
}
}
}
}
@@ -539,10 +455,6 @@ pub(crate) fn get_openai_tools(
tools.push(PLAN_TOOL.clone());
}
if config.apply_patch_tool {
tools.push(create_apply_patch_tool());
}
if let Some(mcp_tools) = mcp_tools {
for (name, tool) in mcp_tools {
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
@@ -558,6 +470,7 @@ pub(crate) fn get_openai_tools(
}
#[cfg(test)]
#[allow(clippy::expect_used)]
mod tests {
use crate::model_family::find_family_for_model;
use mcp_types::ToolInputSchema;
@@ -596,7 +509,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
true,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -611,7 +523,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
true,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -626,7 +537,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
&config,
@@ -720,7 +630,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -776,7 +685,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -827,7 +735,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -881,7 +788,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,9 @@
use std::collections::BTreeMap;
use std::sync::LazyLock;
use serde::Deserialize;
use serde::Serialize;
use crate::codex::Session;
use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
@@ -10,13 +13,29 @@ use crate::openai_tools::ResponsesApiTool;
use crate::protocol::Event;
use crate::protocol::EventMsg;
// Use the canonical plan tool types from the protocol crate to ensure
// type-identity matches events transported via `codex_protocol`.
pub use codex_protocol::plan_tool::PlanItemArg;
pub use codex_protocol::plan_tool::StepStatus;
pub use codex_protocol::plan_tool::UpdatePlanArgs;
// Types for the TODO tool arguments matching codex-vscode/todo-mcp/src/main.rs
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StepStatus {
Pending,
InProgress,
Completed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct PlanItemArg {
pub step: String,
pub status: StepStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct UpdatePlanArgs {
#[serde(default)]
pub explanation: Option<String>,
pub plan: Vec<PlanItemArg>,
}
pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let mut plan_item_props = BTreeMap::new();

View File

@@ -134,6 +134,8 @@ async fn load_first_candidate(
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;

View File

@@ -11,17 +11,16 @@ use std::str::FromStr;
use std::time::Duration;
use mcp_types::CallToolResult;
use mcp_types::Tool as McpTool;
use serde::Deserialize;
use serde::Serialize;
use serde_bytes::ByteBuf;
use strum_macros::Display;
use ts_rs::TS;
use uuid::Uuid;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::message_history::HistoryEntry;
use crate::model_provider_info::ModelProviderInfo;
use crate::parse_command::ParsedCommand;
use crate::plan_tool::UpdatePlanArgs;
@@ -40,8 +39,50 @@ pub struct Submission {
#[allow(clippy::large_enum_variant)]
#[non_exhaustive]
pub enum Op {
/// Configure the model session.
ConfigureSession {
/// Provider identifier ("openai", "openrouter", ...).
provider: ModelProviderInfo,
/// If not specified, server will use its default model.
model: String,
model_reasoning_effort: ReasoningEffortConfig,
model_reasoning_summary: ReasoningSummaryConfig,
/// Model instructions that are appended to the base instructions.
user_instructions: Option<String>,
/// Base instructions override.
base_instructions: Option<String>,
/// When to escalate for approval for execution
approval_policy: AskForApproval,
/// How to sandbox commands executed in the system
sandbox_policy: SandboxPolicy,
/// Disable server-side response storage (send full context each request)
#[serde(default)]
disable_response_storage: bool,
/// Optional external notifier command tokens. Present only when the
/// client wants the agent to spawn a program after each completed
/// turn.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
notify: Option<Vec<String>>,
/// Working directory that should be treated as the *root* of the
/// session. All relative paths supplied by the model as well as the
/// execution sandbox are resolved against this directory **instead**
/// of the process-wide current working directory. CLI front-ends are
/// expected to expand this to an absolute path before sending the
/// `ConfigureSession` operation so that the business-logic layer can
/// operate deterministically.
cwd: std::path::PathBuf,
},
/// Abort current task.
/// This server sends [`EventMsg::TurnAborted`] in response.
/// This server sends no corresponding Event
Interrupt,
/// Input from the user
@@ -50,65 +91,6 @@ pub enum Op {
items: Vec<InputItem>,
},
/// Similar to [`Op::UserInput`], but contains additional context required
/// for a turn of a [`crate::codex_conversation::CodexConversation`].
UserTurn {
/// User input items, see `InputItem`
items: Vec<InputItem>,
/// `cwd` to use with the [`SandboxPolicy`] and potentially tool calls
/// such as `local_shell`.
cwd: PathBuf,
/// Policy to use for command approval.
approval_policy: AskForApproval,
/// Policy to use for tool calls such as `local_shell`.
sandbox_policy: SandboxPolicy,
/// Must be a valid model slug for the [`crate::client::ModelClient`]
/// associated with this conversation.
model: String,
/// Will only be honored if the model is configured to use reasoning.
effort: ReasoningEffortConfig,
/// Will only be honored if the model is configured to use reasoning.
summary: ReasoningSummaryConfig,
},
/// Override parts of the persistent turn context for subsequent turns.
///
/// All fields are optional; when omitted, the existing value is preserved.
/// This does not enqueue any input it only updates defaults used for
/// future `UserInput` turns.
OverrideTurnContext {
/// Updated `cwd` for sandbox/tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
cwd: Option<PathBuf>,
/// Updated command approval policy.
#[serde(skip_serializing_if = "Option::is_none")]
approval_policy: Option<AskForApproval>,
/// Updated sandbox policy for tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
sandbox_policy: Option<SandboxPolicy>,
/// Updated model slug. When set, the model family is derived
/// automatically.
#[serde(skip_serializing_if = "Option::is_none")]
model: Option<String>,
/// Updated reasoning effort (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option<ReasoningEffortConfig>,
/// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
summary: Option<ReasoningSummaryConfig>,
},
/// Approve a command execution
ExecApproval {
/// The id of the submission we are approving
@@ -137,10 +119,6 @@ pub enum Op {
/// Request a single history entry identified by `log_id` + `offset`.
GetHistoryEntryRequest { offset: usize, log_id: u64 },
/// Request the list of MCP tools available across all configured servers.
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
ListMcpTools,
/// Request the agent to summarize the current conversation context.
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
@@ -151,7 +129,7 @@ pub enum Op {
/// Determines the conditions under which the user is consulted to approve
/// running the command proposed by Codex.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize, Display, TS)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize, Display)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum AskForApproval {
@@ -178,7 +156,7 @@ pub enum AskForApproval {
}
/// Determines execution restrictions for model shell commands.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display, TS)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display)]
#[strum(serialize_all = "kebab-case")]
#[serde(tag = "mode", rename_all = "kebab-case")]
pub enum SandboxPolicy {
@@ -223,31 +201,10 @@ pub enum SandboxPolicy {
/// not modified by the agent.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WritableRoot {
/// Absolute path, by construction.
pub root: PathBuf,
/// Also absolute paths, by construction.
pub read_only_subpaths: Vec<PathBuf>,
}
impl WritableRoot {
pub fn is_path_writable(&self, path: &Path) -> bool {
// Check if the path is under the root.
if !path.starts_with(&self.root) {
return false;
}
// Check if the path is under any of the read-only subpaths.
for subpath in &self.read_only_subpaths {
if path.starts_with(subpath) {
return false;
}
}
true
}
}
impl FromStr for SandboxPolicy {
type Err = serde_json::Error;
@@ -458,13 +415,8 @@ pub enum EventMsg {
/// Response to GetHistoryEntryRequest.
GetHistoryEntryResponse(GetHistoryEntryResponseEvent),
/// List of MCP tools available to the agent.
McpListToolsResponse(McpListToolsResponseEvent),
PlanUpdate(UpdatePlanArgs),
TurnAborted(TurnAbortedEvent),
/// Notification that the agent is shutting down.
ShutdownComplete,
}
@@ -516,33 +468,6 @@ impl TokenUsage {
self.total_tokens
.saturating_sub(self.reasoning_output_tokens.unwrap_or(0))
}
/// Estimate the remaining user-controllable percentage of the model's context window.
///
/// `context_window` is the total size of the model's context window.
/// `baseline_used_tokens` should capture tokens that are always present in
/// the context (e.g., system prompt and fixed tool instructions) so that
/// the percentage reflects the portion the user can influence.
///
/// This normalizes both the numerator and denominator by subtracting the
/// baseline, so immediately after the first prompt the UI shows 100% left
/// and trends toward 0% as the user fills the effective window.
pub fn percent_of_context_window_remaining(
&self,
context_window: u64,
baseline_used_tokens: u64,
) -> u8 {
if context_window <= baseline_used_tokens {
return 0;
}
let effective_window = context_window - baseline_used_tokens;
let used = self
.tokens_in_context_window()
.saturating_sub(baseline_used_tokens);
let remaining = effective_window.saturating_sub(used);
((remaining as f32 / effective_window as f32) * 100.0).clamp(0.0, 100.0) as u8
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
@@ -757,13 +682,6 @@ pub struct GetHistoryEntryResponseEvent {
pub entry: Option<HistoryEntry>,
}
/// Response payload for `Op::ListMcpTools`.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpListToolsResponseEvent {
/// Fully qualified tool name -> tool definition.
pub tools: std::collections::HashMap<String, McpTool>,
}
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
pub struct SessionConfiguredEvent {
/// Unique id for this session.
@@ -780,7 +698,7 @@ pub struct SessionConfiguredEvent {
}
/// User's decision in response to an ExecApprovalRequest.
#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, TS)]
#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum ReviewDecision {
/// User has approved this command and the agent should execute it.
@@ -801,7 +719,7 @@ pub enum ReviewDecision {
Abort,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum FileChange {
Add {
@@ -822,20 +740,9 @@ pub struct Chunk {
pub inserted_lines: Vec<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct TurnAbortedEvent {
pub reason: TurnAbortReason,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
#[serde(rename_all = "snake_case")]
pub enum TurnAbortReason {
Interrupted,
Replaced,
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
/// Serialize Event to verify that its JSON representation has the expected

View File

@@ -1,13 +1,11 @@
//! Persist Codex session rollouts (.jsonl) so sessions can be replayed or inspected later.
//! Functionality to persist a Codex conversation rollout to disk.
use std::fs::File;
use std::fs::{self};
use std::io::Error as IoError;
use std::path::Path;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use time::OffsetDateTime;
use time::format_description::FormatItem;
use time::macros::format_description;
@@ -15,7 +13,6 @@ use tokio::io::AsyncWriteExt;
use tokio::sync::mpsc::Sender;
use tokio::sync::mpsc::{self};
use tokio::sync::oneshot;
use tracing::info;
use tracing::warn;
use uuid::Uuid;
@@ -24,6 +21,7 @@ use crate::git_info::GitInfo;
use crate::git_info::collect_git_info;
use crate::models::ResponseItem;
/// Folder inside `~/.codex` that holds saved rollouts.
const SESSIONS_SUBDIR: &str = "sessions";
#[derive(Serialize, Deserialize, Clone, Default)]
@@ -41,28 +39,6 @@ struct SessionMetaWithGit {
git: Option<GitInfo>,
}
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct SessionStateSnapshot {}
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct SavedSession {
pub session: SessionMeta,
#[serde(default)]
pub items: Vec<ResponseItem>,
#[serde(default)]
pub state: SessionStateSnapshot,
pub session_id: Uuid,
}
/// Records all [`ResponseItem`]s for a session and flushes them to disk after
/// every update.
///
/// Rollouts are recorded as JSONL and can be inspected with tools such as:
///
/// ```ignore
/// $ jq -C . ~/.codex/sessions/rollout-2025-05-07T17-24-21-5973b6c0-94b8-487b-a530-2aeb6098ae0e.jsonl
/// $ fx ~/.codex/sessions/rollout-2025-05-07T17-24-21-5973b6c0-94b8-487b-a530-2aeb6098ae0e.jsonl
/// ```
#[derive(Clone)]
pub(crate) struct RolloutRecorder {
tx: Sender<RolloutCmd>,
@@ -70,7 +46,6 @@ pub(crate) struct RolloutRecorder {
enum RolloutCmd {
AddItems(Vec<ResponseItem>),
UpdateState(SessionStateSnapshot),
Shutdown { ack: oneshot::Sender<()> },
}
@@ -89,6 +64,7 @@ impl RolloutRecorder {
timestamp,
} = create_log_file(config, uuid)?;
// Build the static session metadata JSON first.
let timestamp_format: &[FormatItem] = format_description!(
"[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z"
);
@@ -121,8 +97,10 @@ impl RolloutRecorder {
Ok(Self { tx })
}
/// Append `items` to the rollout file.
pub(crate) async fn record_items(&self, items: &[ResponseItem]) -> std::io::Result<()> {
let mut filtered = Vec::new();
// Filter out items we should not serialize.
let mut filtered: Vec<ResponseItem> = Vec::new();
for item in items {
match item {
// Note that function calls may look a bit strange if they are
@@ -131,12 +109,8 @@ impl RolloutRecorder {
ResponseItem::Message { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::FunctionCallOutput { .. }
| ResponseItem::Reasoning { .. } => filtered.push(item.clone()),
ResponseItem::Other => {
// These should never be serialized.
continue;
}
| ResponseItem::FunctionCallOutput { .. } => filtered.push(item.clone()),
ResponseItem::Reasoning { .. } | ResponseItem::Other => {}
}
}
if filtered.is_empty() {
@@ -148,84 +122,6 @@ impl RolloutRecorder {
.map_err(|e| IoError::other(format!("failed to queue rollout items: {e}")))
}
pub(crate) async fn record_state(&self, state: SessionStateSnapshot) -> std::io::Result<()> {
self.tx
.send(RolloutCmd::UpdateState(state))
.await
.map_err(|e| IoError::other(format!("failed to queue rollout state: {e}")))
}
pub async fn resume(
path: &Path,
cwd: std::path::PathBuf,
) -> std::io::Result<(Self, SavedSession)> {
info!("Resuming rollout from {path:?}");
let text = tokio::fs::read_to_string(path).await?;
let mut lines = text.lines();
let meta_line = lines
.next()
.ok_or_else(|| IoError::other("empty session file"))?;
let session: SessionMeta = serde_json::from_str(meta_line)
.map_err(|e| IoError::other(format!("failed to parse session meta: {e}")))?;
let mut items = Vec::new();
let mut state = SessionStateSnapshot::default();
for line in lines {
if line.trim().is_empty() {
continue;
}
let v: Value = match serde_json::from_str(line) {
Ok(v) => v,
Err(_) => continue,
};
if v.get("record_type")
.and_then(|rt| rt.as_str())
.map(|s| s == "state")
.unwrap_or(false)
{
if let Ok(s) = serde_json::from_value::<SessionStateSnapshot>(v.clone()) {
state = s
}
continue;
}
match serde_json::from_value::<ResponseItem>(v.clone()) {
Ok(item) => match item {
ResponseItem::Message { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::FunctionCallOutput { .. }
| ResponseItem::Reasoning { .. } => items.push(item),
ResponseItem::Other => {}
},
Err(e) => {
warn!("failed to parse item: {v:?}, error: {e}");
}
}
}
let saved = SavedSession {
session: session.clone(),
items: items.clone(),
state: state.clone(),
session_id: session.id,
};
let file = std::fs::OpenOptions::new()
.append(true)
.read(true)
.open(path)?;
let (tx, rx) = mpsc::channel::<RolloutCmd>(256);
tokio::task::spawn(rollout_writer(
tokio::fs::File::from_std(file),
rx,
None,
cwd,
));
info!("Resumed rollout successfully from {path:?}");
Ok((Self { tx }, saved))
}
pub async fn shutdown(&self) -> std::io::Result<()> {
let (tx_done, rx_done) = oneshot::channel();
match self.tx.send(RolloutCmd::Shutdown { ack: tx_done }).await {
@@ -316,28 +212,13 @@ async fn rollout_writer(
ResponseItem::Message { .. }
| ResponseItem::LocalShellCall { .. }
| ResponseItem::FunctionCall { .. }
| ResponseItem::FunctionCallOutput { .. }
| ResponseItem::Reasoning { .. } => {
| ResponseItem::FunctionCallOutput { .. } => {
writer.write_line(&item).await?;
}
ResponseItem::Other => {}
ResponseItem::Reasoning { .. } | ResponseItem::Other => {}
}
}
}
RolloutCmd::UpdateState(state) => {
#[derive(Serialize)]
struct StateLine<'a> {
record_type: &'static str,
#[serde(flatten)]
state: &'a SessionStateSnapshot,
}
writer
.write_line(&StateLine {
record_type: "state",
state: &state,
})
.await?;
}
RolloutCmd::Shutdown { ack } => {
let _ = ack.send(());
}

View File

@@ -21,7 +21,7 @@ pub enum SafetyCheck {
pub fn assess_patch_safety(
action: &ApplyPatchAction,
policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
writable_roots: &[PathBuf],
cwd: &Path,
) -> SafetyCheck {
if action.is_empty() {
@@ -45,7 +45,7 @@ pub fn assess_patch_safety(
// is possible that paths in the patch are hard links to files outside the
// writable roots, so we should still run `apply_patch` in a sandbox in that
// case.
if is_write_patch_constrained_to_writable_paths(action, sandbox_policy, cwd)
if is_write_patch_constrained_to_writable_paths(action, writable_roots, cwd)
|| policy == AskForApproval::OnFailure
{
// Only autoapprove when we can actually enforce a sandbox. Otherwise
@@ -171,19 +171,13 @@ pub fn get_platform_sandbox() -> Option<SandboxType> {
fn is_write_patch_constrained_to_writable_paths(
action: &ApplyPatchAction,
sandbox_policy: &SandboxPolicy,
writable_roots: &[PathBuf],
cwd: &Path,
) -> bool {
// Earlyexit if there are no declared writable roots.
let writable_roots = match sandbox_policy {
SandboxPolicy::ReadOnly => {
return false;
}
SandboxPolicy::DangerFullAccess => {
return true;
}
SandboxPolicy::WorkspaceWrite { .. } => sandbox_policy.get_writable_roots_with_cwd(cwd),
};
if writable_roots.is_empty() {
return false;
}
// Normalize a path by removing `.` and resolving `..` without touching the
// filesystem (works even if the file does not exist).
@@ -215,9 +209,15 @@ fn is_write_patch_constrained_to_writable_paths(
None => return false,
};
writable_roots
.iter()
.any(|writable_root| writable_root.is_path_writable(&abs))
writable_roots.iter().any(|root| {
let root_abs = if root.is_absolute() {
root.clone()
} else {
normalize(&cwd.join(root)).unwrap_or_else(|| cwd.join(root))
};
abs.starts_with(&root_abs)
})
};
for (path, change) in action.changes() {
@@ -231,10 +231,10 @@ fn is_write_patch_constrained_to_writable_paths(
if !is_path_writable(path) {
return false;
}
if let Some(dest) = move_path
&& !is_path_writable(dest)
{
return false;
if let Some(dest) = move_path {
if !is_path_writable(dest) {
return false;
}
}
}
}
@@ -245,57 +245,40 @@ fn is_write_patch_constrained_to_writable_paths(
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use tempfile::TempDir;
#[test]
fn test_writable_roots_constraint() {
// Use a temporary directory as our workspace to avoid touching
// the real current working directory.
let tmp = TempDir::new().unwrap();
let cwd = tmp.path().to_path_buf();
let cwd = std::env::current_dir().unwrap();
let parent = cwd.parent().unwrap().to_path_buf();
// Helper to build a singleentry patch that adds a file at `p`.
// Helper to build a singleentry map representing a patch that adds a
// file at `p`.
let make_add_change = |p: PathBuf| ApplyPatchAction::new_add_for_test(&p, "".to_string());
let add_inside = make_add_change(cwd.join("inner.txt"));
let add_outside = make_add_change(parent.join("outside.txt"));
// Policy limited to the workspace only; exclude system temp roots so
// only `cwd` is writable by default.
let policy_workspace_only = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
assert!(is_write_patch_constrained_to_writable_paths(
&add_inside,
&policy_workspace_only,
&[PathBuf::from(".")],
&cwd,
));
let add_outside_2 = make_add_change(parent.join("outside.txt"));
assert!(!is_write_patch_constrained_to_writable_paths(
&add_outside,
&policy_workspace_only,
&add_outside_2,
&[PathBuf::from(".")],
&cwd,
));
// With the parent dir explicitly added as a writable root, the
// outside write should be permitted.
let policy_with_parent = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![parent.clone()],
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
// With parent dir added as writable root, it should pass.
assert!(is_write_patch_constrained_to_writable_paths(
&add_outside,
&policy_with_parent,
&[PathBuf::from("..")],
&cwd,
));
))
}
#[test]

View File

@@ -122,6 +122,7 @@ fn create_seatbelt_command_args(
#[cfg(test)]
mod tests {
#![expect(clippy::expect_used)]
use super::MACOS_SEATBELT_BASE_POLICY;
use super::create_seatbelt_command_args;
use crate::protocol::SandboxPolicy;

View File

@@ -70,13 +70,13 @@ pub async fn default_user_shell() -> Shell {
}
let stdout = String::from_utf8_lossy(&o.stdout);
for line in stdout.lines() {
if let Some(shell_path) = line.strip_prefix("UserShell: ")
&& shell_path.ends_with("/zsh")
{
return Shell::Zsh(ZshShell {
shell_path: shell_path.to_string(),
zshrc_path: format!("{home}/.zshrc"),
});
if let Some(shell_path) = line.strip_prefix("UserShell: ") {
if shell_path.ends_with("/zsh") {
return Shell::Zsh(ZshShell {
shell_path: shell_path.to_string(),
zshrc_path: format!("{home}/.zshrc"),
});
}
}
}
@@ -98,6 +98,7 @@ mod tests {
use std::process::Command;
#[tokio::test]
#[expect(clippy::unwrap_used)]
async fn test_current_shell_detects_zsh() {
let shell = Command::new("sh")
.arg("-c")
@@ -128,6 +129,7 @@ mod tests {
assert_eq!(actual_cmd, None);
}
#[expect(clippy::unwrap_used)]
#[tokio::test]
async fn test_run_with_profile_escaping_and_execution() {
let shell_path = "/bin/zsh";

View File

@@ -1,72 +0,0 @@
use std::sync::OnceLock;
static TERMINAL: OnceLock<String> = OnceLock::new();
pub fn user_agent() -> String {
TERMINAL.get_or_init(detect_terminal).to_string()
}
/// Sanitize a header value to be used in a User-Agent string.
///
/// This function replaces any characters that are not allowed in a User-Agent string with an underscore.
///
/// # Arguments
///
/// * `value` - The value to sanitize.
fn is_valid_header_value_char(c: char) -> bool {
c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.' || c == '/'
}
fn sanitize_header_value(value: String) -> String {
value.replace(|c| !is_valid_header_value_char(c), "_")
}
fn detect_terminal() -> String {
sanitize_header_value(
if let Ok(tp) = std::env::var("TERM_PROGRAM")
&& !tp.trim().is_empty()
{
let ver = std::env::var("TERM_PROGRAM_VERSION").ok();
match ver {
Some(v) if !v.trim().is_empty() => format!("{tp}/{v}"),
_ => tp,
}
} else if let Ok(v) = std::env::var("WEZTERM_VERSION") {
if !v.trim().is_empty() {
format!("WezTerm/{v}")
} else {
"WezTerm".to_string()
}
} else if std::env::var("KITTY_WINDOW_ID").is_ok()
|| std::env::var("TERM")
.map(|t| t.contains("kitty"))
.unwrap_or(false)
{
"kitty".to_string()
} else if std::env::var("ALACRITTY_SOCKET").is_ok()
|| std::env::var("TERM")
.map(|t| t == "alacritty")
.unwrap_or(false)
{
"Alacritty".to_string()
} else if let Ok(v) = std::env::var("KONSOLE_VERSION") {
if !v.trim().is_empty() {
format!("Konsole/{v}")
} else {
"Konsole".to_string()
}
} else if std::env::var("GNOME_TERMINAL_SCREEN").is_ok() {
return "gnome-terminal".to_string();
} else if let Ok(v) = std::env::var("VTE_VERSION") {
if !v.trim().is_empty() {
format!("VTE/{v}")
} else {
"VTE".to_string()
}
} else if std::env::var("WT_SESSION").is_ok() {
return "WindowsTerminal".to_string();
} else {
std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string())
},
)
}

View File

@@ -466,6 +466,7 @@ fn is_windows_drive_or_unc_root(p: &std::path::Path) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;

View File

@@ -4,16 +4,16 @@ pub fn get_codex_user_agent(originator: Option<&str>) -> String {
let build_version = env!("CARGO_PKG_VERSION");
let os_info = os_info::get();
format!(
"{}/{build_version} ({} {}; {}) {}",
"{}/{build_version} ({} {}; {})",
originator.unwrap_or(DEFAULT_ORIGINATOR),
os_info.os_type(),
os_info.version(),
os_info.architecture().unwrap_or("unknown"),
crate::terminal::user_agent()
)
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
@@ -28,10 +28,9 @@ mod tests {
fn test_macos() {
use regex_lite::Regex;
let user_agent = get_codex_user_agent(None);
let re = Regex::new(
r"^codex_cli_rs/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\) (\S+)$",
)
.unwrap();
let re =
Regex::new(r"^codex_cli_rs/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\)$")
.unwrap();
assert!(re.is_match(&user_agent));
}
}

View File

@@ -20,6 +20,7 @@ pub(crate) enum UserNotification {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]

View File

@@ -1,5 +1,8 @@
#![expect(clippy::unwrap_used)]
use assert_cmd::Command as AssertCommand;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use serde_json::Value;
use std::time::Duration;
use std::time::Instant;
use tempfile::TempDir;
@@ -210,7 +213,6 @@ async fn responses_api_stream_cli() {
assert!(stdout.contains("fixture hello"));
}
/// End-to-end: create a session (writes rollout), verify the file, then resume and confirm append.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn integration_creates_and_checks_session_file() {
// Honor sandbox network restrictions for CI parity with the other tests.
@@ -258,65 +260,45 @@ async fn integration_creates_and_checks_session_file() {
String::from_utf8_lossy(&output.stderr)
);
// Wait for sessions dir to appear.
// 5. Sessions are written asynchronously; wait briefly for the directory to appear.
let sessions_dir = home.path().join("sessions");
let dir_deadline = Instant::now() + Duration::from_secs(5);
while !sessions_dir.exists() && Instant::now() < dir_deadline {
let start = Instant::now();
while !sessions_dir.exists() && start.elapsed() < Duration::from_secs(3) {
std::thread::sleep(Duration::from_millis(50));
}
assert!(sessions_dir.exists(), "sessions directory never appeared");
// Find the session file that contains `marker`.
let deadline = Instant::now() + Duration::from_secs(10);
let mut matching_path: Option<std::path::PathBuf> = None;
while Instant::now() < deadline && matching_path.is_none() {
for entry in WalkDir::new(&sessions_dir) {
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
if !entry.file_type().is_file() {
continue;
}
if !entry.file_name().to_string_lossy().ends_with(".jsonl") {
continue;
}
// 6. Scan all session files and find the one that contains our marker.
let mut matching_files = vec![];
for entry in WalkDir::new(&sessions_dir) {
let entry = entry.unwrap();
if entry.file_type().is_file() && entry.file_name().to_string_lossy().ends_with(".jsonl") {
let path = entry.path();
let Ok(content) = std::fs::read_to_string(path) else {
continue;
};
let content = std::fs::read_to_string(path).unwrap();
let mut lines = content.lines();
if lines.next().is_none() {
continue;
}
// Skip SessionMeta (first line)
let _ = lines.next();
for line in lines {
if line.trim().is_empty() {
continue;
}
let item: serde_json::Value = match serde_json::from_str(line) {
Ok(v) => v,
Err(_) => continue,
};
if item.get("type").and_then(|t| t.as_str()) == Some("message")
&& let Some(c) = item.get("content")
&& c.to_string().contains(&marker)
{
matching_path = Some(path.to_path_buf());
break;
let item: Value = serde_json::from_str(line).unwrap();
if let Some("message") = item.get("type").and_then(|t| t.as_str()) {
if let Some(content) = item.get("content") {
if content.to_string().contains(&marker) {
matching_files.push(path.to_owned());
break;
}
}
}
}
}
if matching_path.is_none() {
std::thread::sleep(Duration::from_millis(50));
}
}
assert_eq!(
matching_files.len(),
1,
"Expected exactly one session file containing the marker, found {}",
matching_files.len()
);
let path = &matching_files[0];
let path = match matching_path {
Some(p) => p,
None => panic!("No session file containing the marker was found"),
};
// Basic sanity checks on location and metadata.
// 7. Verify directory structure: sessions/YYYY/MM/DD/filename.jsonl
let rel = match path.strip_prefix(&sessions_dir) {
Ok(r) => r,
Err(_) => panic!("session file should live under sessions/"),
@@ -345,6 +327,7 @@ async fn integration_creates_and_checks_session_file() {
day.len() == 2 && day.chars().all(|c| c.is_ascii_digit()),
"Day dir not zero-padded 2-digit numeric: {day}"
);
// Range checks (best-effort; won't fail on leading zeros)
if let Ok(m) = month.parse::<u8>() {
assert!((1..=12).contains(&m), "Month out of range: {m}");
}
@@ -352,99 +335,34 @@ async fn integration_creates_and_checks_session_file() {
assert!((1..=31).contains(&d), "Day out of range: {d}");
}
let content =
std::fs::read_to_string(&path).unwrap_or_else(|_| panic!("Failed to read session file"));
// 8. Parse SessionMeta line and basic sanity checks.
let content = std::fs::read_to_string(path).unwrap();
let mut lines = content.lines();
let meta_line = lines
.next()
.ok_or("missing session meta line")
.unwrap_or_else(|_| panic!("missing session meta line"));
let meta: serde_json::Value = serde_json::from_str(meta_line)
.unwrap_or_else(|_| panic!("Failed to parse session meta line as JSON"));
let meta: Value = serde_json::from_str(lines.next().unwrap()).unwrap();
assert!(meta.get("id").is_some(), "SessionMeta missing id");
assert!(
meta.get("timestamp").is_some(),
"SessionMeta missing timestamp"
);
// 9. Confirm at least one message contains the marker.
let mut found_message = false;
for line in lines {
if line.trim().is_empty() {
continue;
}
let Ok(item) = serde_json::from_str::<serde_json::Value>(line) else {
continue;
};
if item.get("type").and_then(|t| t.as_str()) == Some("message")
&& let Some(c) = item.get("content")
&& c.to_string().contains(&marker)
{
found_message = true;
break;
let item: Value = serde_json::from_str(line).unwrap();
if item.get("type").map(|t| t == "message").unwrap_or(false) {
if let Some(content) = item.get("content") {
if content.to_string().contains(&marker) {
found_message = true;
break;
}
}
}
}
assert!(
found_message,
"No message found in session file containing the marker"
);
// Second run: resume and append.
let orig_len = content.lines().count();
let marker2 = format!("integration-resume-{}", Uuid::new_v4());
let prompt2 = format!("echo {marker2}");
// Crossplatform safe resume override. On Windows, backslashes in a TOML string must be escaped
// or the parse will fail and the raw literal (including quotes) may be preserved all the way down
// to Config, which in turn breaks resume because the path is invalid. Normalize to forward slashes
// to sidestep the issue.
let resume_path_str = path.to_string_lossy().replace('\\', "/");
let resume_override = format!("experimental_resume=\"{resume_path_str}\"");
let mut cmd2 = AssertCommand::new("cargo");
cmd2.arg("run")
.arg("-p")
.arg("codex-cli")
.arg("--quiet")
.arg("--")
.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg(&resume_override)
.arg("-C")
.arg(env!("CARGO_MANIFEST_DIR"))
.arg(&prompt2);
cmd2.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", &fixture)
.env("OPENAI_BASE_URL", "http://unused.local");
let output2 = cmd2.output().unwrap();
assert!(output2.status.success(), "resume codex-cli run failed");
// The rollout writer runs on a background async task; give it a moment to flush.
let mut new_len = orig_len;
let deadline = Instant::now() + Duration::from_secs(5);
let mut content2 = String::new();
while Instant::now() < deadline {
if let Ok(c) = std::fs::read_to_string(&path) {
let count = c.lines().count();
if count > orig_len {
content2 = c;
new_len = count;
break;
}
}
std::thread::sleep(Duration::from_millis(50));
}
if content2.is_empty() {
// last attempt
content2 = std::fs::read_to_string(&path).unwrap();
new_len = content2.lines().count();
}
assert!(new_len > orig_len, "rollout file did not grow after resume");
assert!(content2.contains(&marker), "rollout lost original marker");
assert!(
content2.contains(&marker2),
"rollout missing resumed marker"
);
// No resume on second run; resume feature removed.
}
/// Integration test to verify git info is collected and recorded in session files.

View File

@@ -1,3 +1,5 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
@@ -7,7 +9,6 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_login::AuthMode;
use codex_login::CodexAuth;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
@@ -26,12 +27,10 @@ fn sse_completed(id: &str) -> String {
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
}
#[expect(clippy::unwrap_used)]
fn assert_message_role(request_body: &serde_json::Value, role: &str) {
assert_eq!(request_body["role"].as_str().unwrap(), role);
}
#[expect(clippy::expect_used)]
fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
.as_str()
@@ -43,7 +42,6 @@ fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
);
}
#[expect(clippy::expect_used)]
fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
.as_str()
@@ -55,61 +53,10 @@ fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) {
);
}
/// Writes an `auth.json` into the provided `codex_home` with the specified parameters.
/// Returns the fake JWT string written to `tokens.id_token`.
#[expect(clippy::unwrap_used)]
fn write_auth_json(
codex_home: &TempDir,
openai_api_key: Option<&str>,
chatgpt_plan_type: &str,
access_token: &str,
account_id: Option<&str>,
) -> String {
use base64::Engine as _;
use serde_json::json;
let header = json!({ "alg": "none", "typ": "JWT" });
let payload = json!({
"email": "user@example.com",
"https://api.openai.com/auth": {
"chatgpt_plan_type": chatgpt_plan_type,
"chatgpt_account_id": account_id.unwrap_or("acc-123")
}
});
let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b);
let header_b64 = b64(&serde_json::to_vec(&header).unwrap());
let payload_b64 = b64(&serde_json::to_vec(&payload).unwrap());
let signature_b64 = b64(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
let mut tokens = json!({
"id_token": fake_jwt,
"access_token": access_token,
"refresh_token": "refresh-test",
});
if let Some(acc) = account_id {
tokens["account_id"] = json!(acc);
}
let auth_json = json!({
"OPENAI_API_KEY": openai_api_key,
"tokens": tokens,
// RFC3339 datetime; value doesn't matter for these tests
"last_refresh": "2025-08-06T20:41:36.232376Z",
});
std::fs::write(
codex_home.path().join("auth.json"),
serde_json::to_string_pretty(&auth_json).unwrap(),
)
.unwrap();
fake_jwt
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_session_id_and_model_headers_in_request() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -182,6 +129,8 @@ async fn includes_session_id_and_model_headers_in_request() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_base_instructions_override_in_request() {
#![allow(clippy::unwrap_used)]
// Mock server
let server = MockServer::start().await;
@@ -238,6 +187,8 @@ async fn includes_base_instructions_override_in_request() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn originator_config_override_is_used() {
#![allow(clippy::unwrap_used)]
// Mock server
let server = MockServer::start().await;
@@ -260,7 +211,7 @@ async fn originator_config_override_is_used() {
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.responses_originator_header = "my_override".to_owned();
config.internal_originator = Some("my_override".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
@@ -287,6 +238,8 @@ async fn originator_config_override_is_used() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn chatgpt_auth_sends_correct_request() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -365,158 +318,10 @@ async fn chatgpt_auth_sends_correct_request() {
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
// Expect ChatGPT base path and correct headers
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(header_regex("Authorization", r"Bearer Access-123"))
.and(header_regex("chatgpt-account-id", r"acc-123"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT.
let _jwt = write_auth_json(
&codex_home,
Some("sk-test-key"),
"pro",
"Access-123",
Some("acc-123"),
);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ChatGPT;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
..
} = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation");
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// verify request body flags
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
!request_body["store"].as_bool().unwrap(),
"store should be false for ChatGPT auth"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
// Expect API key header, no ChatGPT account header required.
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(header_regex("Authorization", r"Bearer sk-test-key"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT,
// but config will force API key preference.
let _jwt = write_auth_json(
&codex_home,
Some("sk-test-key"),
"pro",
"Access-123",
Some("acc-123"),
);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ApiKey;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
..
} = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation");
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// verify request body flags
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
request_body["store"].as_bool().unwrap(),
"store should be true for API key auth"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_user_instructions_message_in_request() {
#![allow(clippy::unwrap_used)]
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
@@ -568,15 +373,17 @@ async fn includes_user_instructions_message_in_request() {
.contains("be nice")
);
assert_message_role(&request_body["input"][0], "user");
assert_message_starts_with(&request_body["input"][0], "<user_instructions>");
assert_message_ends_with(&request_body["input"][0], "</user_instructions>");
assert_message_starts_with(&request_body["input"][0], "<environment_context>\n\n");
assert_message_ends_with(&request_body["input"][0], "</environment_context>");
assert_message_role(&request_body["input"][1], "user");
assert_message_starts_with(&request_body["input"][1], "<environment_context>");
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
assert_message_starts_with(&request_body["input"][1], "<user_instructions>\n\n");
assert_message_ends_with(&request_body["input"][1], "</user_instructions>");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn azure_overrides_assign_properties_used_for_responses_url() {
#![allow(clippy::unwrap_used)]
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };
// Mock server
@@ -653,6 +460,8 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn env_var_overrides_loaded_auth() {
#![allow(clippy::unwrap_used)]
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };
// Mock server

View File

@@ -1,4 +1,4 @@
#![expect(clippy::expect_used)]
#![allow(clippy::expect_used)]
use tempfile::TempDir;
@@ -47,26 +47,6 @@ pub fn load_sse_fixture(path: impl AsRef<std::path::Path>) -> String {
.collect()
}
pub fn load_sse_fixture_with_id_from_str(raw: &str, id: &str) -> String {
let replaced = raw.replace("__ID__", id);
let events: Vec<serde_json::Value> =
serde_json::from_str(&replaced).expect("parse JSON fixture");
events
.into_iter()
.map(|e| {
let kind = e
.get("type")
.and_then(|v| v.as_str())
.expect("fixture event missing type");
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
format!("event: {kind}\n\n")
} else {
format!("event: {kind}\ndata: {e}\n\n")
}
})
.collect()
}
/// Same as [`load_sse_fixture`], but replaces the placeholder `__ID__` in the
/// fixture template with the supplied identifier before parsing. This lets a
/// single JSON template be reused by multiple tests that each need a unique

View File

@@ -1,4 +1,5 @@
#![cfg(target_os = "macos")]
#![expect(clippy::unwrap_used, clippy::expect_used)]
use std::collections::HashMap;
@@ -23,7 +24,6 @@ fn skip_test() -> bool {
false
}
#[expect(clippy::expect_used)]
async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput> {
let sandbox_type = get_platform_sandbox().expect("should be able to get sandbox type");
assert_eq!(sandbox_type, SandboxType::MacosSeatbelt);
@@ -68,6 +68,7 @@ async fn truncates_output_lines() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let cmd = vec!["seq", "300"];
#[expect(clippy::unwrap_used)]
let output = run_test_cmd(tmp, cmd).await.unwrap();
let expected_output = (1..=256)

View File

@@ -17,7 +17,7 @@ fn require_api_key() -> String {
/// Helper that spawns the binary inside a TempDir with minimal flags. Returns (Assert, TempDir).
fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) {
#![expect(clippy::unwrap_used)]
#![allow(clippy::unwrap_used)]
use std::io::Read;
use std::io::Write;
use std::thread;
@@ -113,6 +113,7 @@ fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) {
#[ignore]
#[test]
fn live_create_file_hello_txt() {
#![allow(clippy::unwrap_used)]
if std::env::var("OPENAI_API_KEY").is_err() {
eprintln!("skipping live_create_file_hello_txt OPENAI_API_KEY not set");
return;

View File

@@ -1,13 +1,11 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_login::CodexAuth;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
@@ -26,6 +24,7 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefixes_context_and_instructions_once_and_consistently_across_requests() {
#![allow(clippy::unwrap_used)]
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
@@ -86,7 +85,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
assert_eq!(requests.len(), 2, "expected two POST requests");
let expected_env_text = format!(
"<environment_context>\nCurrent working directory: {}\nApproval policy: on-request\nSandbox mode: read-only\nNetwork access: restricted\n</environment_context>",
"<environment_context>\n\nCurrent working directory: {}\nApproval policy: on-request\nSandbox policy: read-only\nNetwork access: restricted\n\n\n</environment_context>",
cwd.path().to_string_lossy()
);
let expected_ui_text =
@@ -114,7 +113,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
assert_eq!(
body1["input"],
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
serde_json::json!([expected_env_msg, expected_ui_msg, expected_user_message_1])
);
let expected_user_message_2 = serde_json::json!({
@@ -133,230 +132,3 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
);
assert_eq!(body2["input"], expected_body2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
// First turn
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 1".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Change everything about the turn context.
let new_cwd = TempDir::new().unwrap();
let writable = TempDir::new().unwrap();
codex
.submit(Op::OverrideTurnContext {
cwd: Some(new_cwd.path().to_path_buf()),
approval_policy: Some(AskForApproval::Never),
sandbox_policy: Some(SandboxPolicy::WorkspaceWrite {
writable_roots: vec![writable.path().to_path_buf()],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
}),
model: Some("o3".to_string()),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
})
.await
.unwrap();
// Second turn after overrides
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 2".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
// prompt_cache_key should remain constant across overrides
assert_eq!(
body1["prompt_cache_key"], body2["prompt_cache_key"],
"prompt_cache_key should not change across overrides"
);
// The entire prefix from the first request should be identical and reused
// as the prefix of the second request, ensuring cache hit potential.
let expected_user_message_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
// After overriding the turn context, the environment context should be emitted again
// reflecting the new cwd, approval policy and sandbox settings.
let expected_env_text_2 = format!(
"<environment_context>\nCurrent working directory: {}\nApproval policy: never\nSandbox mode: workspace-write\nNetwork access: enabled\n</environment_context>",
new_cwd.path().to_string_lossy()
);
let expected_env_msg_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_env_msg_2, expected_user_message_2].as_slice(),
]
.concat()
);
assert_eq!(body2["input"], expected_body2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
// First turn
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 1".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Second turn using per-turn overrides via UserTurn
let new_cwd = TempDir::new().unwrap();
let writable = TempDir::new().unwrap();
codex
.submit(Op::UserTurn {
items: vec![InputItem::Text {
text: "hello 2".into(),
}],
cwd: new_cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::WorkspaceWrite {
writable_roots: vec![writable.path().to_path_buf()],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
},
model: "o3".to_string(),
effort: ReasoningEffort::High,
summary: ReasoningSummary::Detailed,
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
// prompt_cache_key should remain constant across per-turn overrides
assert_eq!(
body1["prompt_cache_key"], body2["prompt_cache_key"],
"prompt_cache_key should not change across per-turn overrides"
);
// The entire prefix from the first request should be identical and reused
// as the prefix of the second request.
let expected_user_message_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_user_message_2].as_slice(),
]
.concat()
);
assert_eq!(body2["input"], expected_body2);
}

View File

@@ -1,7 +1,5 @@
#![cfg(target_os = "macos")]
//! Tests for the macOS sandboxing that are specific to Seatbelt.
//! Tests that apply to both Mac and Linux sandboxing should go in sandbox.rs.
#![expect(clippy::expect_used)]
use std::collections::HashMap;
use std::path::Path;
@@ -159,7 +157,6 @@ async fn read_only_forbids_all_writes() {
.await;
}
#[expect(clippy::expect_used)]
fn create_test_scenario(tmp: &TempDir) -> TestScenario {
let repo_parent = tmp.path().to_path_buf();
let repo_root = repo_parent.join("repo");
@@ -177,7 +174,6 @@ fn create_test_scenario(tmp: &TempDir) -> TestScenario {
}
}
#[expect(clippy::expect_used)]
/// Note that `path` must be absolute.
async fn touch(path: &Path, policy: &SandboxPolicy) -> bool {
assert!(path.is_absolute(), "Path must be absolute: {path:?}");

View File

@@ -25,6 +25,7 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn continue_after_stream_error() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."

View File

@@ -33,6 +33,8 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn retries_on_early_close() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."

View File

@@ -26,7 +26,6 @@ codex-common = { path = "../common", features = [
] }
codex-core = { path = "../core" }
codex-ollama = { path = "../ollama" }
codex-protocol = { path = "../protocol" }
owo-colors = "4.2.0"
serde_json = "1"
shlex = "1.3.0"
@@ -42,8 +41,5 @@ tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
[dev-dependencies]
assert_cmd = "2"
core_test_support = { path = "../core/tests/common" }
libc = "0.2"
predicates = "3"
tempfile = "3.13.0"
wiremock = "0.6"

View File

@@ -29,9 +29,9 @@ pub(crate) fn handle_last_message(last_agent_message: Option<&str>, output_file:
}
fn write_last_message_file(contents: &str, last_message_path: Option<&Path>) {
if let Some(path) = last_message_path
&& let Err(e) = std::fs::write(path, contents)
{
eprintln!("Failed to write last message file {path:?}: {e}");
if let Some(path) = last_message_path {
if let Err(e) = std::fs::write(path, contents) {
eprintln!("Failed to write last message file {path:?}: {e}");
}
}
}

View File

@@ -21,7 +21,6 @@ use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::SessionConfiguredEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::protocol::TurnAbortReason;
use codex_core::protocol::TurnDiffEvent;
use owo_colors::OwoColorize;
use owo_colors::Style;
@@ -192,7 +191,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.answer_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
@@ -208,7 +207,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.reasoning_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningSectionBreak(_) => {
@@ -216,7 +215,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
return CodexStatus::Running;
}
println!();
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent { text }) => {
@@ -225,7 +224,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
}
if !self.raw_reasoning_started {
print!("{text}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
} else {
println!();
@@ -242,7 +241,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.raw_reasoning_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
@@ -523,17 +522,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::GetHistoryEntryResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::McpListToolsResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::TurnAborted(abort_reason) => match abort_reason.reason {
TurnAbortReason::Interrupted => {
ts_println!(self, "task interrupted");
}
TurnAbortReason::Replaced => {
ts_println!(self, "task aborted: replaced by a new task");
}
},
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
}
CodexStatus::Running

View File

@@ -28,7 +28,7 @@ impl EventProcessor for EventProcessorWithJsonOutput {
.into_iter()
.map(|(key, value)| (key.to_string(), value))
.collect::<HashMap<String, String>>();
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
let config_json =
serde_json::to_string(&entries).expect("Failed to serialize config summary to JSON");
println!("{config_json}");

View File

@@ -13,6 +13,7 @@ use codex_core::ConversationManager;
use codex_core::NewConversation;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config_types::SandboxMode;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
@@ -21,7 +22,6 @@ use codex_core::protocol::Op;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::util::is_inside_git_repo;
use codex_ollama::DEFAULT_OSS_MODEL;
use codex_protocol::config_types::SandboxMode;
use event_processor_with_human_output::EventProcessorWithHumanOutput;
use event_processor_with_json_output::EventProcessorWithJsonOutput;
use tracing::debug;
@@ -146,7 +146,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
codex_linux_sandbox_exe,
base_instructions: None,
include_plan_tool: None,
include_apply_patch_tool: None,
disable_response_storage: oss.then_some(true),
show_raw_agent_reasoning: oss.then_some(true),
};

View File

@@ -1,5 +1,3 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use anyhow::Context;
use assert_cmd::prelude::*;
use codex_core::CODEX_APPLY_PATCH_ARG1;
@@ -39,152 +37,3 @@ fn test_standalone_exec_cli_can_use_apply_patch() -> anyhow::Result<()> {
);
Ok(())
}
#[cfg(not(target_os = "windows"))]
#[tokio::test]
async fn test_apply_patch_tool() -> anyhow::Result<()> {
use core_test_support::load_sse_fixture_with_id_from_str;
use tempfile::TempDir;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
const SSE_TOOL_CALL_ADD: &str = r#"[
{
"type": "response.output_item.done",
"item": {
"type": "function_call",
"name": "apply_patch",
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Add File: test.md\\n+Hello world\\n*** End Patch\"\n}",
"call_id": "__ID__"
}
},
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
const SSE_TOOL_CALL_UPDATE: &str = r#"[
{
"type": "response.output_item.done",
"item": {
"type": "function_call",
"name": "apply_patch",
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Update File: test.md\\n@@\\n-Hello world\\n+Final text\\n*** End Patch\"\n}",
"call_id": "__ID__"
}
},
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
const SSE_TOOL_CALL_COMPLETED: &str = r#"[
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
// Start a mock model server
let server = MockServer::start().await;
// First response: model calls apply_patch to create test.md
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_ADD, "call1"),
"text/event-stream",
);
Mock::given(method("POST"))
// .and(path("/v1/responses"))
.respond_with(first)
.up_to_n_times(1)
.mount(&server)
.await;
// Second response: model calls apply_patch to update test.md
let second = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_UPDATE, "call2"),
"text/event-stream",
);
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(second)
.up_to_n_times(1)
.mount(&server)
.await;
let final_completed = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_COMPLETED, "resp3"),
"text/event-stream",
);
Mock::given(method("POST"))
// .and(path("/v1/responses"))
.respond_with(final_completed)
.expect(1)
.mount(&server)
.await;
let tmp_cwd = TempDir::new().unwrap();
Command::cargo_bin("codex-exec")
.context("should find binary for codex-exec")?
.current_dir(tmp_cwd.path())
.env("CODEX_HOME", tmp_cwd.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()))
.arg("--skip-git-repo-check")
.arg("-s")
.arg("workspace-write")
.arg("foo")
.assert()
.success();
// Verify final file contents
let final_path = tmp_cwd.path().join("test.md");
let contents = std::fs::read_to_string(&final_path)
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
assert_eq!(contents, "Final text\n");
Ok(())
}

View File

@@ -1,219 +0,0 @@
#![cfg(unix)]
use codex_core::protocol::SandboxPolicy;
use codex_core::spawn::StdioPolicy;
use std::collections::HashMap;
use std::future::Future;
use std::io;
use std::path::PathBuf;
use std::process::ExitStatus;
use tokio::process::Child;
#[cfg(target_os = "macos")]
async fn spawn_command_under_sandbox(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child> {
use codex_core::seatbelt::spawn_command_under_seatbelt;
spawn_command_under_seatbelt(command, sandbox_policy, cwd, stdio_policy, env).await
}
#[cfg(target_os = "linux")]
async fn spawn_command_under_sandbox(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child> {
use codex_core::landlock::spawn_command_under_linux_sandbox;
let codex_linux_sandbox_exe = assert_cmd::cargo::cargo_bin("codex-exec");
spawn_command_under_linux_sandbox(
codex_linux_sandbox_exe,
command,
sandbox_policy,
cwd,
stdio_policy,
env,
)
.await
}
#[tokio::test]
async fn python_multiprocessing_lock_works_under_sandbox() {
#[cfg(target_os = "macos")]
let writable_roots = Vec::<PathBuf>::new();
// From https://man7.org/linux/man-pages/man7/sem_overview.7.html
//
// > On Linux, named semaphores are created in a virtual filesystem,
// > normally mounted under /dev/shm.
#[cfg(target_os = "linux")]
let writable_roots = vec![PathBuf::from("/dev/shm")];
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
};
let python_code = r#"import multiprocessing
from multiprocessing import Lock, Process
def f(lock):
with lock:
print("Lock acquired in child process")
if __name__ == '__main__':
lock = Lock()
p = Process(target=f, args=(lock,))
p.start()
p.join()
"#;
let mut child = spawn_command_under_sandbox(
vec![
"python3".to_string(),
"-c".to_string(),
python_code.to_string(),
],
&policy,
std::env::current_dir().expect("should be able to get current dir"),
StdioPolicy::Inherit,
HashMap::new(),
)
.await
.expect("should be able to spawn python under sandbox");
let status = child.wait().await.expect("should wait for child process");
assert!(status.success(), "python exited with {status:?}");
}
fn unix_sock_body() {
unsafe {
let mut fds = [0i32; 2];
let r = libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr());
assert_eq!(
r,
0,
"socketpair(AF_UNIX, SOCK_DGRAM) failed: {}",
io::Error::last_os_error()
);
let msg = b"hello_unix";
// write() from one end (generic write is allowed)
let sent = libc::write(fds[0], msg.as_ptr() as *const libc::c_void, msg.len());
assert!(sent >= 0, "write() failed: {}", io::Error::last_os_error());
// recvfrom() on the other end. We dont need the address for socketpair,
// so we pass null pointers for src address.
let mut buf = [0u8; 64];
let recvd = libc::recvfrom(
fds[1],
buf.as_mut_ptr() as *mut libc::c_void,
buf.len(),
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
assert!(
recvd >= 0,
"recvfrom() failed: {}",
io::Error::last_os_error()
);
let recvd_slice = &buf[..(recvd as usize)];
assert_eq!(
recvd_slice,
&msg[..],
"payload mismatch: sent {} bytes, got {} bytes",
msg.len(),
recvd
);
// Also exercise AF_UNIX stream socketpair quickly to ensure AF_UNIX in general works.
let mut sfds = [0i32; 2];
let sr = libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, sfds.as_mut_ptr());
assert_eq!(
sr,
0,
"socketpair(AF_UNIX, SOCK_STREAM) failed: {}",
io::Error::last_os_error()
);
let snt2 = libc::write(sfds[0], msg.as_ptr() as *const libc::c_void, msg.len());
assert!(
snt2 >= 0,
"write(stream) failed: {}",
io::Error::last_os_error()
);
let mut b2 = [0u8; 64];
let rcv2 = libc::recv(sfds[1], b2.as_mut_ptr() as *mut libc::c_void, b2.len(), 0);
assert!(
rcv2 >= 0,
"recv(stream) failed: {}",
io::Error::last_os_error()
);
// Clean up
let _ = libc::close(sfds[0]);
let _ = libc::close(sfds[1]);
let _ = libc::close(fds[0]);
let _ = libc::close(fds[1]);
}
}
#[tokio::test]
async fn allow_unix_socketpair_recvfrom() {
run_code_under_sandbox(
"allow_unix_socketpair_recvfrom",
&SandboxPolicy::ReadOnly,
|| async { unix_sock_body() },
)
.await
.expect("should be able to reexec");
}
const IN_SANDBOX_ENV_VAR: &str = "IN_SANDBOX";
#[expect(clippy::expect_used)]
pub async fn run_code_under_sandbox<F, Fut>(
test_selector: &str,
policy: &SandboxPolicy,
child_body: F,
) -> io::Result<Option<ExitStatus>>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
if std::env::var(IN_SANDBOX_ENV_VAR).is_err() {
let exe = std::env::current_exe()?;
let mut cmds = vec![exe.to_string_lossy().into_owned(), "--exact".into()];
let mut stdio_policy = StdioPolicy::RedirectForShellTool;
// Allow for us to pass forward --nocapture / use the right stdio policy.
if std::env::args().any(|a| a == "--nocapture") {
cmds.push("--nocapture".into());
stdio_policy = StdioPolicy::Inherit;
}
cmds.push(test_selector.into());
// Your existing launcher:
let mut child = spawn_command_under_sandbox(
cmds,
policy,
std::env::current_dir().expect("should be able to get current dir"),
stdio_policy,
HashMap::from([("IN_SANDBOX".into(), "1".into())]),
)
.await?;
let status = child.wait().await?;
Ok(Some(status))
} else {
// Child branch: run the provided body.
child_body().await;
Ok(None)
}
}

View File

@@ -140,6 +140,7 @@ fn is_executable_file(path: &str) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use tempfile::TempDir;
use super::*;
@@ -214,12 +215,7 @@ system_path=[{fake_cp:?}]
// Only readable folders specified.
assert_eq!(
checker.check(
valid_exec.clone(),
&cwd,
std::slice::from_ref(&root_path),
&[]
),
checker.check(valid_exec.clone(), &cwd, &[root_path.clone()], &[]),
Err(WriteablePathNotInWriteableFolders {
file: dest_path.clone(),
folders: vec![]
@@ -231,8 +227,8 @@ system_path=[{fake_cp:?}]
checker.check(
valid_exec.clone(),
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&root_path)
&[root_path.clone()],
&[root_path.clone()]
),
Ok(cp.clone()),
);
@@ -251,8 +247,8 @@ system_path=[{fake_cp:?}]
checker.check(
valid_exec_call_folders_as_args,
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&root_path)
&[root_path.clone()],
&[root_path.clone()]
),
Ok(cp.clone()),
);
@@ -274,8 +270,8 @@ system_path=[{fake_cp:?}]
checker.check(
exec_with_parent_of_readable_folder,
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&dest_path)
&[root_path.clone()],
&[dest_path.clone()]
),
Err(ReadablePathNotInReadableFolders {
file: root_path.parent().unwrap().to_path_buf(),

View File

@@ -56,16 +56,16 @@ impl Policy {
}
for arg in args {
if let Some(regex) = &self.forbidden_substrings_pattern
&& regex.is_match(arg)
{
return Ok(MatchedExec::Forbidden {
cause: Forbidden::Arg {
arg: arg.clone(),
exec_call: exec_call.clone(),
},
reason: format!("arg `{arg}` contains forbidden substring"),
});
if let Some(regex) = &self.forbidden_substrings_pattern {
if regex.is_match(arg) {
return Ok(MatchedExec::Forbidden {
cause: Forbidden::Arg {
arg: arg.clone(),
exec_call: exec_call.clone(),
},
reason: format!("arg `{arg}` contains forbidden substring"),
});
}
}
}

View File

@@ -3,12 +3,12 @@ use crate::error::Result;
pub fn parse_sed_command(sed_command: &str) -> Result<()> {
// For now, we parse only commands like `122,202p`.
if let Some(stripped) = sed_command.strip_suffix("p")
&& let Some((first, rest)) = stripped.split_once(",")
&& first.parse::<u64>().is_ok()
&& rest.parse::<u64>().is_ok()
{
return Ok(());
if let Some(stripped) = sed_command.strip_suffix("p") {
if let Some((first, rest)) = stripped.split_once(",") {
if first.parse::<u64>().is_ok() && rest.parse::<u64>().is_ok() {
return Ok(());
}
}
}
Err(Error::SedCommandNotProvablySafe {

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
use codex_execpolicy::NegativeExamplePassedCheck;
use codex_execpolicy::get_default_policy;

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
extern crate codex_execpolicy;
use codex_execpolicy::ArgMatcher;
@@ -11,7 +12,6 @@ use codex_execpolicy::Result;
use codex_execpolicy::ValidExec;
use codex_execpolicy::get_default_policy;
#[expect(clippy::expect_used)]
fn setup() -> Policy {
get_default_policy().expect("failed to load default policy")
}

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
use codex_execpolicy::PositiveExampleFailedCheck;
use codex_execpolicy::get_default_policy;

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
use codex_execpolicy::ArgMatcher;
use codex_execpolicy::ArgType;
use codex_execpolicy::Error;
@@ -12,7 +13,6 @@ use codex_execpolicy::get_default_policy;
extern crate codex_execpolicy;
#[expect(clippy::expect_used)]
fn setup() -> Policy {
get_default_policy().expect("failed to load default policy")
}

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
use codex_execpolicy::ArgType;
use codex_execpolicy::Error;
use codex_execpolicy::ExecCall;

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
extern crate codex_execpolicy;
use codex_execpolicy::ArgType;
@@ -11,7 +12,6 @@ use codex_execpolicy::Result;
use codex_execpolicy::ValidExec;
use codex_execpolicy::get_default_policy;
#[expect(clippy::expect_used)]
fn setup() -> Policy {
get_default_policy().expect("failed to load default policy")
}

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
extern crate codex_execpolicy;
use std::vec;
@@ -11,7 +12,6 @@ use codex_execpolicy::PositionalArg;
use codex_execpolicy::ValidExec;
use codex_execpolicy::get_default_policy;
#[expect(clippy::expect_used)]
fn setup() -> Policy {
get_default_policy().expect("failed to load default policy")
}

View File

@@ -1,3 +1,4 @@
#![expect(clippy::expect_used)]
extern crate codex_execpolicy;
use codex_execpolicy::ArgType;
@@ -12,7 +13,6 @@ use codex_execpolicy::Result;
use codex_execpolicy::ValidExec;
use codex_execpolicy::get_default_policy;
#[expect(clippy::expect_used)]
fn setup() -> Policy {
get_default_policy().expect("failed to load default policy")
}

View File

@@ -228,11 +228,11 @@ pub fn run(
for &Reverse((score, ref line)) in best_list.binary_heap.iter() {
if global_heap.len() < limit.get() {
global_heap.push(Reverse((score, line.clone())));
} else if let Some(min_element) = global_heap.peek()
&& score > min_element.0.0
{
global_heap.pop();
global_heap.push(Reverse((score, line.clone())));
} else if let Some(min_element) = global_heap.peek() {
if score > min_element.0.0 {
global_heap.pop();
global_heap.push(Reverse((score, line.clone())));
}
}
}
}
@@ -320,11 +320,11 @@ impl BestMatchesList {
if self.binary_heap.len() < self.max_count {
self.binary_heap.push(Reverse((score, line.to_string())));
} else if let Some(min_element) = self.binary_heap.peek()
&& score > min_element.0.0
{
self.binary_heap.pop();
self.binary_heap.push(Reverse((score, line.to_string())));
} else if let Some(min_element) = self.binary_heap.peek() {
if score > min_element.0.0 {
self.binary_heap.pop();
self.binary_heap.push(Reverse((score, line.to_string())));
}
}
}
}

View File

@@ -24,8 +24,8 @@ file-search *args:
fmt:
cargo fmt -- --config imports_granularity=Item
fix *args:
cargo clippy --fix --all-features --tests --allow-dirty "$@"
fix:
cargo clippy --fix --all-features --tests --allow-dirty
install:
rustup show active-toolchain

View File

@@ -20,7 +20,7 @@ clap = { version = "4", features = ["derive"] }
codex-common = { path = "../common", features = ["cli"] }
codex-core = { path = "../core" }
landlock = "0.4.1"
libc = "0.2.175"
libc = "0.2.172"
seccompiler = "0.5.0"
[target.'cfg(target_os = "linux")'.dev-dependencies]

View File

@@ -104,9 +104,7 @@ fn install_network_seccomp_filter_on_current_thread() -> std::result::Result<(),
deny_syscall(libc::SYS_sendto);
deny_syscall(libc::SYS_sendmsg);
deny_syscall(libc::SYS_sendmmsg);
// NOTE: allowing recvfrom allows some tools like: `cargo clippy` to run
// with their socketpair + child processes for sub-proc management
// deny_syscall(libc::SYS_recvfrom);
deny_syscall(libc::SYS_recvfrom);
deny_syscall(libc::SYS_recvmsg);
deny_syscall(libc::SYS_recvmmsg);
deny_syscall(libc::SYS_getsockopt);
@@ -117,12 +115,12 @@ fn install_network_seccomp_filter_on_current_thread() -> std::result::Result<(),
let unix_only_rule = SeccompRule::new(vec![SeccompCondition::new(
0, // first argument (domain)
SeccompCmpArgLen::Dword,
SeccompCmpOp::Ne,
SeccompCmpOp::Eq,
libc::AF_UNIX as u64,
)?])?;
rules.insert(libc::SYS_socket, vec![unix_only_rule.clone()]);
rules.insert(libc::SYS_socketpair, vec![unix_only_rule]); // always deny (Unix can use socketpair but fine, keep open?)
rules.insert(libc::SYS_socket, vec![unix_only_rule]);
rules.insert(libc::SYS_socketpair, vec![]); // always deny (Unix can use socketpair but fine, keep open?)
let filter = SeccompFilter::new(
rules,

View File

@@ -1,4 +1,6 @@
#![cfg(target_os = "linux")]
#![expect(clippy::unwrap_used, clippy::expect_used)]
use codex_core::config_types::ShellEnvironmentPolicy;
use codex_core::error::CodexErr;
use codex_core::error::SandboxErr;
@@ -33,7 +35,7 @@ fn create_env_from_core_vars() -> HashMap<String, String> {
create_env(&policy)
}
#[expect(clippy::print_stdout, clippy::expect_used, clippy::unwrap_used)]
#[allow(clippy::print_stdout)]
async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
let params = ExecParams {
command: cmd.iter().map(|elm| elm.to_string()).collect(),
@@ -130,7 +132,6 @@ async fn test_timeout() {
/// does NOT succeed (i.e. returns a nonzero exit code) **unless** the binary
/// is missing in which case we silently treat it as an accepted skip so the
/// suite remains green on leaner CI images.
#[expect(clippy::expect_used)]
async fn assert_network_blocked(cmd: &[&str]) {
let cwd = std::env::current_dir().expect("cwd should exist");
let params = ExecParams {

View File

@@ -9,14 +9,11 @@ workspace = true
[dependencies]
base64 = "0.22"
chrono = { version = "0.4", features = ["serde"] }
rand = "0.8"
reqwest = { version = "0.12", features = ["json", "blocking"] }
reqwest = { version = "0.12", features = ["json"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
sha2 = "0.10"
tempfile = "3"
thiserror = "2.0.12"
tiny_http = "0.12"
tokio = { version = "1", features = [
"io-std",
"macros",
@@ -24,9 +21,6 @@ tokio = { version = "1", features = [
"rt-multi-thread",
"signal",
] }
url = "2"
urlencoding = "2.1"
webbrowser = "1.0"
[dev-dependencies]
pretty_assertions = "1.4.1"

Some files were not shown because too many files have changed in this diff Show More