Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Bolin
2c3d906f19 exploration: new protocol format
Here we explore an evolution of the internal protocol that can be mapped to JSON-RPC in a straightforward way.
2025-08-12 11:28:50 -07:00
233 changed files with 8929 additions and 47792 deletions

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser

View File

@@ -1,31 +0,0 @@
name: 🎁 Feature Request
description: Propose a new feature for Codex
labels:
- enhancement
- needs triage
body:
- type: markdown
attributes:
value: |
Is Codex missing a feature that you'd like to see? Feel free to propose it here.
Before you submit a feature:
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details.
- type: textarea
id: feature
attributes:
label: What feature would you like to see?
validations:
required: true
- type: textarea
id: author
attributes:
label: Are you interested in implementing this feature?
description: Please wait for acknowledgement before implementing or opening a PR.
- type: textarea
id: notes
attributes:
label: Additional information
description: Is there anything else you think we should know?

View File

@@ -9,7 +9,7 @@
},
"devDependencies": {
"@types/bun": "^1.2.20",
"@types/node": "^24.3.0",
"@types/node": "^24.2.1",
"prettier": "^3.6.2",
"typescript": "^5.9.2",
},
@@ -50,7 +50,7 @@
"@types/bun": ["@types/bun@1.2.20", "", { "dependencies": { "bun-types": "1.2.20" } }, "sha512-dX3RGzQ8+KgmMw7CsW4xT5ITBSCrSbfHc36SNT31EOUg/LA9JWq0VDdEXDRSe1InVWpd2yLUM1FUF/kEOyTzYA=="],
"@types/node": ["@types/node@24.3.0", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow=="],
"@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
@@ -82,8 +82,6 @@
"@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
"bun-types/@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
"@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
"@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],

View File

@@ -14,7 +14,7 @@
},
"devDependencies": {
"@types/bun": "^1.2.20",
"@types/node": "^24.3.0",
"@types/node": "^24.2.1",
"prettier": "^3.6.2",
"typescript": "^5.9.2"
}

View File

@@ -24,7 +24,3 @@ updates:
directory: /
schedule:
interval: weekly
- package-ecosystem: rust-toolchain
directory: codex-rs
schedule:
interval: weekly

View File

@@ -17,10 +17,6 @@
"linux-aarch64": {
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
"path": "codex"
},
"windows-x86_64": {
"regex": "^codex-x86_64-pc-windows-msvc\\.exe\\.zst$",
"path": "codex.exe"
}
}
}

View File

@@ -1,6 +0,0 @@
# External (non-OpenAI) Pull Request Requirements
Before opening this Pull Request, please read the "Contributing" section of the README or your PR may be closed:
https://github.com/openai/codex#contributing
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.

View File

@@ -12,7 +12,7 @@ jobs:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4

View File

@@ -18,7 +18,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Annotate locations with typos
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
- name: Codespell

View File

@@ -37,9 +37,9 @@ jobs:
# Codex is not going to run.
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.89
- uses: dtolnay/rust-toolchain@1.88
with:
targets: x86_64-unknown-linux-gnu
components: clippy

View File

@@ -1,76 +1,42 @@
name: rust-ci
on:
pull_request: {}
pull_request:
branches:
- main
paths:
- "codex-rs/**"
- ".github/**"
push:
branches:
- main
workflow_dispatch:
# CI builds in debug (dev) for faster signal.
# For CI, we build in debug (`--profile dev`) rather than release mode so we
# get signal faster.
jobs:
# --- Detect what changed (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
outputs:
codex: ${{ steps.detect.outputs.codex }}
workflows: ${{ steps.detect.outputs.workflows }}
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Detect changed paths (no external action)
id: detect
shell: bash
run: |
set -euo pipefail
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
BASE_SHA='${{ github.event.pull_request.base.sha }}'
echo "Base SHA: $BASE_SHA"
# List files changed between base and current HEAD (merge-base aware)
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
else
# On push / manual runs, default to running everything
files=("codex-rs/force" ".github/force")
fi
codex=false
workflows=false
for f in "${files[@]}"; do
[[ $f == codex-rs/* ]] && codex=true
[[ $f == .github/* ]] && workflows=true
done
echo "codex=$codex" >> "$GITHUB_OUTPUT"
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
# --- CI that doesn't need specific targets ---------------------------------
# CI that don't need specific targets
general:
name: Format / etc
runs-on: ubuntu-24.04
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
components: rustfmt
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
# --- CI to validate on different os/targets --------------------------------
# CI to validate on different os/targets
lint_build_test:
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
name: ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
@@ -78,41 +44,27 @@ jobs:
strategy:
fail-fast: false
matrix:
# Note: While Codex CLI does not support Windows today, we include
# Windows in CI to ensure the code at least builds there.
include:
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: macos-14
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
- runner: macos-14
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
targets: ${{ matrix.target }}
components: clippy
@@ -125,36 +77,33 @@ jobs:
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
run: |
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
sudo apt install -y musl-tools pkg-config
- name: cargo clippy
id: clippy
continue-on-error: true
run: cargo clippy --target ${{ matrix.target }} --all-features --tests -- -D warnings
# Running `cargo build` from the workspace root builds the workspace using
# the union of all features from third-party crates. This can mask errors
# where individual crates have underspecified features. To avoid this, we
# run `cargo check` for each crate individually, though because this is
# run `cargo build` for each crate individually, though because this is
# slower, we only do this for the x86_64-unknown-linux-gnu target.
- name: cargo check individual crates
id: cargo_check_all_crates
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }}
- name: cargo build individual crates
id: build
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' }}
continue-on-error: true
run: |
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
run: find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo build'
- name: cargo test
id: test
# `cargo test` takes too long for release builds to run them on every PR
if: ${{ matrix.profile != 'release' }}
continue-on-error: true
run: cargo test --all-features --target ${{ matrix.target }} --profile ${{ matrix.profile }}
run: cargo test --all-features --target ${{ matrix.target }}
env:
RUST_BACKTRACE: 1
@@ -162,32 +111,8 @@ jobs:
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure' ||
steps.build.outcome == 'failure' ||
steps.test.outcome == 'failure'
run: |
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
echo "One or more checks failed (clippy, build, or test). See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs: [changed, general, lint_build_test]
if: always()
runs-on: ubuntu-24.04
steps:
- name: Summarize
shell: bash
run: |
echo "general: ${{ needs.general.result }}"
echo "matrix : ${{ needs.lint_build_test.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
echo 'No relevant changes -> CI not required.'
exit 0
fi
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }

View File

@@ -19,7 +19,7 @@ jobs:
tag-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Validate tag matches Cargo.toml version
shell: bash
@@ -74,8 +74,8 @@ jobs:
target: x86_64-pc-windows-msvc
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.89
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.88
with:
targets: ${{ matrix.target }}
@@ -87,7 +87,7 @@ jobs:
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
@@ -117,11 +117,10 @@ jobs:
dest="dist/${{ matrix.target }}"
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
# additionally create a `.tar.gz` alongside every single binary that
# we publish. The end result is:
# codex-<target>.zst (existing)
# codex-<target>.tar.gz (new)
# codex-<target>.zip (only for Windows)
# 1. Produce a .tar.gz for every file in the directory *before* we
# run `zstd --rm`, because that flag deletes the original files.
@@ -129,20 +128,13 @@ jobs:
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
if [[ "$base" == *.tar.gz ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Create zip archive for Windows binaries
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd -T0 -19 --rm "$dest/$base"
@@ -163,7 +155,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:

36
.vscode/launch.json vendored
View File

@@ -1,22 +1,18 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo launch",
"cargo": {
"cwd": "${workspaceFolder}/codex-rs",
"args": ["build", "--bin=codex-tui"]
},
"args": []
},
{
"type": "lldb",
"request": "attach",
"name": "Attach to running codex CLI",
"pid": "${command:pickProcess}",
"sourceLanguages": ["rust"]
}
]
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo launch",
"cargo": {
"cwd": "${workspaceFolder}/codex-rs",
"args": [
"build",
"--bin=codex-tui"
]
},
"args": []
}
]
}

View File

@@ -8,35 +8,6 @@ In the codex-rs folder where the rust code lives:
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
Before finalizing a change to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
## TUI style conventions
See `codex-rs/tui/styles.md`.
## TUI code conventions
- Use concise styling helpers from ratatuis Stylize trait.
- Basic spans: use "text".into()
- Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc.
- Prefer these over constructing styles with `Span::styled` and `Style` directly.
- Example: patch summary file lines
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
## Snapshot tests
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
- Run tests to generate any updated snapshots:
- `cargo test -p codex-tui`
- Check whats pending:
- `cargo insta pending-snapshots -p codex-tui`
- Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file:
- `cargo insta show -p codex-tui path/to/file.snap.new`
- Only if you intend to accept all new snapshots in this crate, run:
- `cargo insta accept -p codex-tui`
If you dont have the tool:
- `cargo install cargo-insta`
When making individual changes prefer running tests on individual files or projects first.

View File

@@ -22,7 +22,6 @@
- [Authenticate locally and copy your credentials to the "headless" machine](#authenticate-locally-and-copy-your-credentials-to-the-headless-machine)
- [Connecting through VPS or remote](#connecting-through-vps-or-remote)
- [Usage-based billing alternative: Use an OpenAI API key](#usage-based-billing-alternative-use-an-openai-api-key)
- [Forcing a specific auth method (advanced)](#forcing-a-specific-auth-method-advanced)
- [Choosing Codex's level of autonomy](#choosing-codexs-level-of-autonomy)
- [**1. Read/write**](#1-readwrite)
- [**2. Read-only**](#2-read-only)
@@ -166,35 +165,6 @@ Notes:
- This command only sets the key for your current terminal session, which we recommend. To set it for all future sessions, you can also add the `export` line to your shell's configuration file (e.g., `~/.zshrc`).
- If you have signed in with ChatGPT, Codex will default to using your ChatGPT credits. If you wish to use your API key, use the `/logout` command to clear your ChatGPT authentication.
#### Forcing a specific auth method (advanced)
You can explicitly choose which authentication Codex should prefer when both are available.
- To always use your API key (even when ChatGPT auth exists), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "apikey"
```
Or override ad-hoc via CLI:
```bash
codex --config preferred_auth_method="apikey"
```
- To prefer ChatGPT auth (default), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "chatgpt"
```
Notes:
- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped.
- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode.
### Choosing Codex's level of autonomy
We always recommend running Codex in its default sandbox that gives you strong guardrails around what the agent can do. The default sandbox prevents it from editing files outside its workspace, or from accessing the network.
@@ -596,13 +566,9 @@ We're excited to launch a **$1 million initiative** supporting open source proje
## Contributing
This project is under active development and the code will likely change pretty significantly.
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
**At the moment, we only plan to prioritize reviewing external contributions for bugs or security fixes.**
If you want to add a new feature or change the behavior of an existing one, please open an issue proposing the feature and get approval from an OpenAI team member before spending time building it.
**New contributions that don't go through this process may be closed** if they aren't aligned with our current roadmap or conflict with other priorities/upcoming features.
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
### Development workflow
@@ -627,9 +593,8 @@ If you want to add a new feature or change the behavior of an existing one, plea
### Review process
1. One maintainer will be assigned as a primary reviewer.
2. If your PR adds a new feature that was not previously discussed and approved, we may choose to close your PR (see [Contributing](#contributing)).
3. We may ask for changes - please do not take this personally. We value the work, but we also value consistency and long-term maintainability.
5. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
### Community values

View File

@@ -43,7 +43,7 @@ switch (platform) {
targetTriple = "x86_64-pc-windows-msvc.exe";
break;
case "arm64":
// We do not build this today, fall through...
// We do not build this today, fall through...
default:
break;
}
@@ -65,43 +65,9 @@ const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
// receives a fatal signal, both processes exit in a predictable manner.
const { spawn } = await import("child_process");
async function tryImport(moduleName) {
try {
// eslint-disable-next-line node/no-unsupported-features/es-syntax
return await import(moduleName);
} catch (err) {
return null;
}
}
async function resolveRgDir() {
const ripgrep = await tryImport("@vscode/ripgrep");
if (!ripgrep?.rgPath) {
return null;
}
return path.dirname(ripgrep.rgPath);
}
function getUpdatedPath(newDirs) {
const pathSep = process.platform === "win32" ? ";" : ":";
const existingPath = process.env.PATH || "";
const updatedPath = [
...newDirs,
...existingPath.split(pathSep).filter(Boolean),
].join(pathSep);
return updatedPath;
}
const additionalDirs = [];
const rgDir = await resolveRgDir();
if (rgDir) {
additionalDirs.push(rgDir);
}
const updatedPath = getUpdatedPath(additionalDirs);
const child = spawn(binaryPath, process.argv.slice(2), {
stdio: "inherit",
env: { ...process.env, PATH: updatedPath, CODEX_MANAGED_BY_NPM: "1" },
env: { ...process.env, CODEX_MANAGED_BY_NPM: "1" },
});
child.on("error", (err) => {
@@ -154,3 +120,4 @@ if (childResult.type === "signal") {
} else {
process.exit(childResult.exitCode);
}

View File

@@ -1,119 +0,0 @@
{
"name": "@openai/codex",
"version": "0.0.0-dev",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@openai/codex",
"version": "0.0.0-dev",
"license": "Apache-2.0",
"dependencies": {
"@vscode/ripgrep": "^1.15.14"
},
"bin": {
"codex": "bin/codex.js"
},
"engines": {
"node": ">=20"
}
},
"node_modules/@vscode/ripgrep": {
"version": "1.15.14",
"resolved": "https://registry.npmjs.org/@vscode/ripgrep/-/ripgrep-1.15.14.tgz",
"integrity": "sha512-/G1UJPYlm+trBWQ6cMO3sv6b8D1+G16WaJH1/DSqw32JOVlzgZbLkDxRyzIpTpv30AcYGMkCf5tUqGlW6HbDWw==",
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^7.0.2",
"proxy-from-env": "^1.1.0",
"yauzl": "^2.9.2"
}
},
"node_modules/agent-base": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/buffer-crc32": {
"version": "0.2.13",
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==",
"license": "MIT",
"engines": {
"node": "*"
}
},
"node_modules/debug": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
"integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/fd-slicer": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz",
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
"license": "MIT",
"dependencies": {
"pend": "~1.2.0"
}
},
"node_modules/https-proxy-agent": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
"license": "MIT",
"dependencies": {
"agent-base": "^7.1.2",
"debug": "4"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/pend": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz",
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==",
"license": "MIT"
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/yauzl": {
"version": "2.10.0",
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
"license": "MIT",
"dependencies": {
"buffer-crc32": "~0.2.3",
"fd-slicer": "~1.1.0"
}
}
}
}

View File

@@ -16,11 +16,5 @@
"repository": {
"type": "git",
"url": "git+https://github.com/openai/codex.git"
},
"dependencies": {
"@vscode/ripgrep": "^1.15.14"
},
"devDependencies": {
"prettier": "^3.3.3"
}
}

329
codex-rs/Cargo.lock generated
View File

@@ -176,9 +176,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.99"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
[[package]]
name = "arbitrary"
@@ -203,12 +203,6 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "ascii"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16"
[[package]]
name = "ascii-canvas"
version = "3.0.0"
@@ -487,12 +481,6 @@ dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-expr"
version = "0.15.8"
@@ -530,17 +518,11 @@ dependencies = [
"windows-link",
]
[[package]]
name = "chunked_transfer"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901"
[[package]]
name = "clap"
version = "4.5.45"
version = "4.5.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318"
checksum = "50fd97c9dc2399518aa331917ac6f274280ec5eb34e555dd291899745c48ec6f"
dependencies = [
"clap_builder",
"clap_derive",
@@ -548,9 +530,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.44"
version = "4.5.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8"
checksum = "c35b5830294e1fa0462034af85cc95225a4cb07092c088c55bda3147cfcd8f65"
dependencies = [
"anstream",
"anstyle",
@@ -561,18 +543,18 @@ dependencies = [
[[package]]
name = "clap_complete"
version = "4.5.57"
version = "4.5.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad"
checksum = "67e4efcbb5da11a92e8a609233aa1e8a7d91e38de0be865f016d14700d45a7fd"
dependencies = [
"clap",
]
[[package]]
name = "clap_derive"
version = "4.5.45"
version = "4.5.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6"
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
dependencies = [
"heck",
"proc-macro2",
@@ -665,8 +647,6 @@ dependencies = [
"codex-exec",
"codex-login",
"codex-mcp-server",
"codex-protocol",
"codex-protocol-ts",
"codex-tui",
"serde_json",
"tokio",
@@ -680,7 +660,6 @@ version = "0.0.0"
dependencies = [
"clap",
"codex-core",
"codex-protocol",
"serde",
"toml 0.9.5",
]
@@ -698,11 +677,11 @@ dependencies = [
"codex-apply-patch",
"codex-login",
"codex-mcp-client",
"codex-protocol",
"core_test_support",
"dirs",
"env-flags",
"eventsource-stream",
"fs2",
"futures",
"landlock",
"libc",
@@ -710,11 +689,9 @@ dependencies = [
"mcp-types",
"mime_guess",
"openssl-sys",
"os_info",
"predicates",
"pretty_assertions",
"rand 0.9.2",
"regex-lite",
"reqwest",
"seccompiler",
"serde",
@@ -754,9 +731,6 @@ dependencies = [
"codex-common",
"codex-core",
"codex-ollama",
"codex-protocol",
"core_test_support",
"libc",
"owo-colors",
"predicates",
"serde_json",
@@ -765,7 +739,6 @@ dependencies = [
"tokio",
"tracing",
"tracing-subscriber",
"wiremock",
]
[[package]]
@@ -823,18 +796,12 @@ dependencies = [
"base64 0.22.1",
"chrono",
"pretty_assertions",
"rand 0.8.5",
"reqwest",
"serde",
"serde_json",
"sha2",
"tempfile",
"thiserror 2.0.12",
"tiny_http",
"tokio",
"url",
"urlencoding",
"webbrowser",
]
[[package]]
@@ -857,10 +824,7 @@ dependencies = [
"anyhow",
"assert_cmd",
"codex-arg0",
"codex-common",
"codex-core",
"codex-login",
"codex-protocol",
"mcp-types",
"mcp_test_support",
"pretty_assertions",
@@ -900,33 +864,19 @@ dependencies = [
name = "codex-protocol"
version = "0.0.0"
dependencies = [
"mcp-types",
"pretty_assertions",
"serde",
"serde_bytes",
"serde_json",
"strum 0.27.2",
"strum_macros 0.27.2",
"ts-rs",
"uuid",
]
[[package]]
name = "codex-protocol-ts"
version = "0.0.0"
dependencies = [
"anyhow",
"clap",
"codex-protocol",
"ts-rs",
]
[[package]]
name = "codex-tui"
version = "0.0.0"
dependencies = [
"anyhow",
"async-stream",
"base64 0.22.1",
"chrono",
"clap",
@@ -937,7 +887,6 @@ dependencies = [
"codex-file-search",
"codex-login",
"codex-ollama",
"codex-protocol",
"color-eyre",
"crossterm",
"diffy",
@@ -946,10 +895,9 @@ dependencies = [
"lazy_static",
"libc",
"mcp-types",
"once_cell",
"path-clean",
"pretty_assertions",
"rand 0.9.2",
"rand 0.8.5",
"ratatui",
"ratatui-image",
"regex-lite",
@@ -962,7 +910,6 @@ dependencies = [
"supports-color",
"textwrap 0.16.2",
"tokio",
"tokio-stream",
"tracing",
"tracing-appender",
"tracing-subscriber",
@@ -1013,16 +960,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]]
name = "compact_str"
version = "0.8.1"
@@ -1077,16 +1014,6 @@ dependencies = [
"libc",
]
[[package]]
name = "core-foundation"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
@@ -1163,7 +1090,6 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
dependencies = [
"bitflags 2.9.1",
"crossterm_winapi",
"futures-core",
"mio",
"parking_lot",
"rustix 0.38.44",
@@ -1747,6 +1673,16 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "fs2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "futures"
version = "0.3.31"
@@ -2528,28 +2464,6 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "jni"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"log",
"thiserror 1.0.69",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.33"
@@ -2632,9 +2546,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
[[package]]
name = "libc"
version = "0.2.175"
version = "0.2.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
[[package]]
name = "libfuzzer-sys"
@@ -2781,7 +2695,6 @@ version = "0.0.0"
dependencies = [
"serde",
"serde_json",
"ts-rs",
]
[[package]]
@@ -2792,10 +2705,8 @@ dependencies = [
"assert_cmd",
"codex-core",
"codex-mcp-server",
"codex-protocol",
"mcp-types",
"pretty_assertions",
"serde",
"serde_json",
"shlex",
"tempfile",
@@ -2889,12 +2800,6 @@ dependencies = [
"tempfile",
]
[[package]]
name = "ndk-context"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
[[package]]
name = "new_debug_unreachable"
version = "1.0.6"
@@ -3048,31 +2953,6 @@ dependencies = [
"libc",
]
[[package]]
name = "objc2"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "561f357ba7f3a2a61563a186a163d0a3a5247e1089524a3981d49adb775078bc"
dependencies = [
"objc2-encode",
]
[[package]]
name = "objc2-encode"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33"
[[package]]
name = "objc2-foundation"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "900831247d2fe1a09a683278e5384cfb8c80c79fe6b166f9d14bfdde0ea1b03c"
dependencies = [
"bitflags 2.9.1",
"objc2",
]
[[package]]
name = "object"
version = "0.36.7"
@@ -3176,18 +3056,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "os_info"
version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0e1ac5fde8d43c34139135df8ea9ee9465394b2d8d20f032d38998f64afffc3"
dependencies = [
"log",
"plist",
"serde",
"windows-sys 0.52.0",
]
[[package]]
name = "overload"
version = "0.1.1"
@@ -3799,7 +3667,6 @@ dependencies = [
"base64 0.22.1",
"bytes",
"encoding_rs",
"futures-channel",
"futures-core",
"futures-util",
"h2",
@@ -4122,7 +3989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
"bitflags 2.9.1",
"core-foundation 0.9.4",
"core-foundation",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -4281,17 +4148,6 @@ dependencies = [
"digest",
]
[[package]]
name = "sha2"
version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@@ -4656,7 +4512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
dependencies = [
"bitflags 2.9.1",
"core-foundation 0.9.4",
"core-foundation",
"system-configuration-sys",
]
@@ -4713,15 +4569,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "termcolor"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "terminal_size"
version = "0.4.2"
@@ -4860,18 +4707,6 @@ dependencies = [
"crunchy",
]
[[package]]
name = "tiny_http"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82"
dependencies = [
"ascii",
"chunked_transfer",
"httpdate",
"log",
]
[[package]]
name = "tinystr"
version = "0.8.1"
@@ -5221,30 +5056,6 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "ts-rs"
version = "11.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be"
dependencies = [
"serde_json",
"thiserror 2.0.12",
"ts-rs-macros",
"uuid",
]
[[package]]
name = "ts-rs-macros"
version = "11.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9d4ed7b4c18cc150a6a0a1e9ea1ecfa688791220781af6e119f9599a8502a0a"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"termcolor",
]
[[package]]
name = "tui-input"
version = "0.14.0"
@@ -5348,12 +5159,6 @@ dependencies = [
"serde",
]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]]
name = "utf8_iter"
version = "1.0.4"
@@ -5577,22 +5382,6 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "webbrowser"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aaf4f3c0ba838e82b4e5ccc4157003fb8c324ee24c058470ffb82820becbde98"
dependencies = [
"core-foundation 0.10.1",
"jni",
"log",
"ndk-context",
"objc2",
"objc2-foundation",
"url",
"web-sys",
]
[[package]]
name = "weezl"
version = "0.1.10"
@@ -5781,15 +5570,6 @@ dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
@@ -5817,21 +5597,6 @@ dependencies = [
"windows-targets 0.53.2",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
@@ -5864,12 +5629,6 @@ dependencies = [
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
@@ -5882,12 +5641,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
@@ -5900,12 +5653,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
@@ -5930,12 +5677,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
@@ -5948,12 +5689,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
@@ -5966,12 +5701,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
@@ -5984,12 +5713,6 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"

View File

@@ -16,7 +16,6 @@ members = [
"mcp-types",
"ollama",
"protocol",
"protocol-ts",
"tui",
]
resolver = "2"

View File

@@ -22,8 +22,6 @@ use tree_sitter_bash::LANGUAGE as BASH;
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
#[derive(Debug, Error, PartialEq)]
pub enum ApplyPatchError {
#[error(transparent)]
@@ -84,6 +82,7 @@ pub struct ApplyPatchArgs {
}
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
match argv {
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
Ok(source) => MaybeApplyPatch::Body(source),
@@ -92,9 +91,7 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
[bash, flag, script]
if bash == "bash"
&& flag == "-lc"
&& APPLY_PATCH_COMMANDS
.iter()
.any(|cmd| script.trim_start().starts_with(cmd)) =>
&& script.trim_start().starts_with("apply_patch") =>
{
match extract_heredoc_body_from_apply_patch_command(script) {
Ok(body) => match parse_patch(&body) {
@@ -169,7 +166,7 @@ impl ApplyPatchAction {
panic!("path must be absolute");
}
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
let filename = path
.file_name()
.expect("path should not be empty")
@@ -182,7 +179,7 @@ impl ApplyPatchAction {
*** End Patch"#,
);
let changes = HashMap::from([(path.to_path_buf(), ApplyPatchFileChange::Add { content })]);
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
Self {
changes,
cwd: path
@@ -265,10 +262,7 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
fn extract_heredoc_body_from_apply_patch_command(
src: &str,
) -> std::result::Result<String, ExtractHeredocError> {
if !APPLY_PATCH_COMMANDS
.iter()
.any(|cmd| src.trim_start().starts_with(cmd))
{
if !src.trim_start().starts_with("apply_patch") {
return Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch);
}
@@ -421,12 +415,12 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
for hunk in hunks {
match hunk {
Hunk::AddFile { path, contents } => {
if let Some(parent) = path.parent()
&& !parent.as_os_str().is_empty()
{
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", path.display())
})?;
if let Some(parent) = path.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", path.display())
})?;
}
}
std::fs::write(path, contents)
.with_context(|| format!("Failed to write file {}", path.display()))?;
@@ -445,12 +439,15 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
let AppliedPatch { new_contents, .. } =
derive_new_contents_from_chunks(path, chunks)?;
if let Some(dest) = move_path {
if let Some(parent) = dest.parent()
&& !parent.as_os_str().is_empty()
{
std::fs::create_dir_all(parent).with_context(|| {
format!("Failed to create parent directories for {}", dest.display())
})?;
if let Some(parent) = dest.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent).with_context(|| {
format!(
"Failed to create parent directories for {}",
dest.display()
)
})?;
}
}
std::fs::write(dest, new_contents)
.with_context(|| format!("Failed to write file {}", dest.display()))?;
@@ -532,12 +529,9 @@ fn compute_replacements(
// If a chunk has a `change_context`, we use seek_sequence to find it, then
// adjust our `line_index` to continue from there.
if let Some(ctx_line) = &chunk.change_context {
if let Some(idx) = seek_sequence::seek_sequence(
original_lines,
std::slice::from_ref(ctx_line),
line_index,
false,
) {
if let Some(idx) =
seek_sequence::seek_sequence(original_lines, &[ctx_line.clone()], line_index, false)
{
line_index = idx + 1;
} else {
return Err(ApplyPatchError::ComputeReplacements(format!(
@@ -688,6 +682,8 @@ pub fn print_summary(
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;
use std::fs;
@@ -779,33 +775,6 @@ PATCH"#,
}
}
#[test]
fn test_heredoc_applypatch() {
let args = strs_to_strings(&[
"bash",
"-lc",
r#"applypatch <<'PATCH'
*** Begin Patch
*** Add File: foo
+hi
*** End Patch
PATCH"#,
]);
match maybe_parse_apply_patch(&args) {
MaybeApplyPatch::Body(ApplyPatchArgs { hunks, patch: _ }) => {
assert_eq!(
hunks,
vec![Hunk::AddFile {
path: PathBuf::from("foo"),
contents: "hi\n".to_string()
}]
);
}
result => panic!("expected MaybeApplyPatch::Body got {result:?}"),
}
}
#[test]
fn test_add_file_hunk_creates_file_with_contents() {
let dir = tempdir().unwrap();

View File

@@ -427,6 +427,7 @@ fn parse_update_file_chunk(
}
#[test]
#[allow(clippy::unwrap_used)]
fn test_parse_patch() {
assert_eq!(
parse_patch_text("bad", ParseMode::Strict),

View File

@@ -82,34 +82,10 @@ where
})
}
const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_";
/// Load env vars from ~/.codex/.env and `$(pwd)/.env`.
///
/// Security: Do not allow `.env` files to create or modify any variables
/// with names starting with `CODEX_`.
fn load_dotenv() {
if let Ok(codex_home) = codex_core::config::find_codex_home()
&& let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env"))
{
set_filtered(iter);
}
if let Ok(iter) = dotenvy::dotenv_iter() {
set_filtered(iter);
}
}
/// Helper to set vars from a dotenvy iterator while filtering out `CODEX_` keys.
fn set_filtered<I>(iter: I)
where
I: IntoIterator<Item = Result<(String, String), dotenvy::Error>>,
{
for (key, value) in iter.into_iter().flatten() {
if !key.to_ascii_uppercase().starts_with(ILLEGAL_ENV_VAR_PREFIX) {
// It is safe to call set_var() because our process is
// single-threaded at this point in its execution.
unsafe { std::env::set_var(&key, &value) };
}
if let Ok(codex_home) = codex_core::config::find_codex_home() {
dotenvy::from_path(codex_home.join(".env")).ok();
}
dotenvy::dotenv().ok();
}

View File

@@ -1,5 +1,4 @@
use codex_core::config::Config;
use codex_core::user_agent::get_codex_user_agent;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
@@ -31,7 +30,7 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
.bearer_auth(&token.access_token)
.header("chatgpt-account-id", account_id?)
.header("Content-Type", "application/json")
.header("User-Agent", get_codex_user_agent(None))
.header("User-Agent", "codex-cli")
.send()
.await
.context("Failed to send request")?;

View File

@@ -1,4 +1,3 @@
use codex_login::AuthMode;
use codex_login::CodexAuth;
use std::path::Path;
use std::sync::LazyLock;
@@ -20,7 +19,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
let auth = CodexAuth::from_codex_home(codex_home)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);

View File

@@ -1,3 +1,5 @@
#![expect(clippy::expect_used)]
use codex_chatgpt::apply_command::apply_diff_from_task;
use codex_chatgpt::get_task::GetTaskResponse;
use std::path::Path;

View File

@@ -25,7 +25,6 @@ codex-core = { path = "../core" }
codex-exec = { path = "../exec" }
codex-login = { path = "../login" }
codex-mcp-server = { path = "../mcp-server" }
codex-protocol = { path = "../protocol" }
codex-tui = { path = "../tui" }
serde_json = "1"
tokio = { version = "1", features = [
@@ -37,4 +36,3 @@ tokio = { version = "1", features = [
] }
tracing = "0.1.41"
tracing-subscriber = "0.3.19"
codex-protocol-ts = { path = "../protocol-ts" }

View File

@@ -3,11 +3,11 @@ use std::path::PathBuf;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config_types::SandboxMode;
use codex_core::exec::spawn_command_under_linux_sandbox;
use codex_core::exec_env::create_env;
use codex_core::landlock::spawn_command_under_linux_sandbox;
use codex_core::seatbelt::spawn_command_under_seatbelt;
use codex_core::spawn::StdioPolicy;
use codex_protocol::config_types::SandboxMode;
use crate::LandlockCommand;
use crate::SeatbeltCommand;

View File

@@ -1,33 +1,20 @@
use std::env;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_login::AuthMode;
use codex_login::CLIENT_ID;
use codex_login::CodexAuth;
use codex_login::OPENAI_API_KEY_ENV_VAR;
use codex_login::ServerOptions;
use codex_login::login_with_api_key;
use codex_login::login_with_chatgpt;
use codex_login::logout;
use codex_login::run_login_server;
use std::env;
use std::path::PathBuf;
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string());
let server = run_login_server(opts)?;
eprintln!(
"Starting local login server on http://localhost:{}.\nIf your browser did not open, navigate to this URL to authenticate:\n\n{}",
server.actual_port, server.auth_url,
);
server.block_until_done().await
}
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match login_with_chatgpt(config.codex_home).await {
let capture_output = false;
match login_with_chatgpt(&config.codex_home, capture_output).await {
Ok(_) => {
eprintln!("Successfully logged in");
std::process::exit(0);
@@ -60,18 +47,18 @@ pub async fn run_login_with_api_key(
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
match CodexAuth::from_codex_home(&config.codex_home) {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token().await {
Ok(api_key) => {
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
&& env_api_key == api_key
{
eprintln!(
" API loaded from OPENAI_API_KEY environment variable or .env file"
);
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR) {
if env_api_key == api_key {
eprintln!(
" API loaded from OPENAI_API_KEY environment variable or .env file"
);
}
}
std::process::exit(0);
}

View File

@@ -27,11 +27,7 @@ use crate::proto::ProtoCli;
author,
version,
// If a subcommand is given, ignore requirements of the default args.
subcommand_negates_reqs = true,
// The executable is sometimes invoked via a platformspecific name like
// `codex-x86_64-unknown-linux-musl`, but the help output should always use
// the generic `codex` command name that users run.
bin_name = "codex"
subcommand_negates_reqs = true
)]
struct MultitoolCli {
#[clap(flatten)]
@@ -72,10 +68,6 @@ enum Subcommand {
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
#[clap(visible_alias = "a")]
Apply(ApplyCommand),
/// Internal: generate TypeScript protocol bindings.
#[clap(hide = true)]
GenerateTs(GenerateTsCommand),
}
#[derive(Debug, Parser)]
@@ -124,17 +116,6 @@ struct LogoutCommand {
config_overrides: CliConfigOverrides,
}
#[derive(Debug, Parser)]
struct GenerateTsCommand {
/// Output directory where .ts files will be written
#[arg(short = 'o', long = "out", value_name = "DIR")]
out_dir: PathBuf,
/// Optional path to the Prettier executable to format generated files
#[arg(short = 'p', long = "prettier", value_name = "PRETTIER_BIN")]
prettier: Option<PathBuf>,
}
fn main() -> anyhow::Result<()> {
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
cli_main(codex_linux_sandbox_exe).await?;
@@ -209,9 +190,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
prepend_config_flags(&mut apply_cli.config_overrides, cli.config_overrides);
run_apply_command(apply_cli, None).await?;
}
Some(Subcommand::GenerateTs(gen_cli)) => {
codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?;
}
}
Ok(())

View File

@@ -1,14 +1,15 @@
use std::io::IsTerminal;
use std::sync::Arc;
use clap::Parser;
use codex_common::CliConfigOverrides;
use codex_core::ConversationManager;
use codex_core::NewConversation;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Submission;
use codex_core::util::notify_on_sigint;
use codex_login::CodexAuth;
use tokio::io::AsyncBufReadExt;
use tokio::io::BufReader;
use tracing::error;
@@ -35,38 +36,22 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
.map_err(anyhow::Error::msg)?;
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
// Use conversation_manager API to start a conversation
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation_id: _,
conversation,
session_configured,
} = conversation_manager.new_conversation(config).await?;
// Simulate streaming the session_configured event.
let synthetic_event = Event {
// Fake id value.
id: "".to_string(),
msg: EventMsg::SessionConfigured(session_configured),
};
let session_configured_event = match serde_json::to_string(&synthetic_event) {
Ok(s) => s,
Err(e) => {
error!("Failed to serialize session_configured: {e}");
return Err(anyhow::Error::from(e));
}
};
println!("{session_configured_event}");
let auth = CodexAuth::from_codex_home(&config.codex_home)?;
let ctrl_c = notify_on_sigint();
let CodexSpawnOk { codex, .. } = Codex::spawn(config, auth, ctrl_c.clone()).await?;
let codex = Arc::new(codex);
// Task that reads JSON lines from stdin and forwards to Submission Queue
let sq_fut = {
let conversation = conversation.clone();
let codex = codex.clone();
let ctrl_c = ctrl_c.clone();
async move {
let stdin = BufReader::new(tokio::io::stdin());
let mut lines = stdin.lines();
loop {
let result = tokio::select! {
_ = tokio::signal::ctrl_c() => {
_ = ctrl_c.notified() => {
info!("Interrupted, exiting");
break
},
res = lines.next_line() => res,
@@ -80,7 +65,7 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
}
match serde_json::from_str::<Submission>(line) {
Ok(sub) => {
if let Err(e) = conversation.submit_with_id(sub).await {
if let Err(e) = codex.submit_with_id(sub).await {
error!("{e:#}");
break;
}
@@ -103,8 +88,8 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
let eq_fut = async move {
loop {
let event = tokio::select! {
_ = tokio::signal::ctrl_c() => break,
event = conversation.next_event() => event,
_ = ctrl_c.notified() => break,
event = codex.next_event() => event,
};
match event {
Ok(event) => {

View File

@@ -1,9 +0,0 @@
allow-expect-in-tests = true
allow-unwrap-in-tests = true
disallowed-methods = [
{ path = "ratatui::style::Color::Rgb", reason = "Use ANSI colors, which work better in various terminal themes." },
{ path = "ratatui::style::Color::Indexed", reason = "Use ANSI colors, which work better in various terminal themes." },
{ path = "ratatui::style::Stylize::white", reason = "Avoid hardcoding white; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
{ path = "ratatui::style::Stylize::black", reason = "Avoid hardcoding black; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
{ path = "ratatui::style::Stylize::yellow", reason = "Avoid yellow; prefer other colors in `tui/styles.md`." },
]

View File

@@ -9,7 +9,6 @@ workspace = true
[dependencies]
clap = { version = "4", features = ["derive", "wrap_help"], optional = true }
codex-core = { path = "../core" }
codex-protocol = { path = "../protocol" }
serde = { version = "1", optional = true }
toml = { version = "0.9", optional = true }

View File

@@ -1,46 +0,0 @@
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
/// A simple preset pairing an approval policy with a sandbox policy.
#[derive(Debug, Clone)]
pub struct ApprovalPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Approval policy to apply.
pub approval: AskForApproval,
/// Sandbox policy to apply.
pub sandbox: SandboxPolicy,
}
/// Built-in list of approval presets that pair approval and sandbox policy.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
vec![
ApprovalPreset {
id: "read-only",
label: "Read Only",
description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::ReadOnly,
},
ApprovalPreset {
id: "auto",
label: "Auto",
description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::new_workspace_write_policy(),
},
ApprovalPreset {
id: "full-access",
label: "Full Access",
description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution",
approval: AskForApproval::Never,
sandbox: SandboxPolicy::DangerFullAccess,
},
]
}

View File

@@ -142,6 +142,7 @@ fn parse_toml_value(raw: &str) -> Result<Value, toml::de::Error> {
}
#[cfg(all(test, feature = "cli"))]
#[allow(clippy::expect_used, clippy::unwrap_used)]
mod tests {
use super::*;

View File

@@ -29,8 +29,3 @@ mod config_summary;
pub use config_summary::create_config_summary_entries;
// Shared fuzzy matcher (used by TUI selection popups and other UI filtering)
pub mod fuzzy_match;
// Shared model presets used by TUI and MCP server
pub mod model_presets;
// Shared approval presets (AskForApproval + Sandbox) used by TUI and MCP server
// Not to be confused with AskForApproval, which we should probably rename to EscalationPolicy.
pub mod approval_presets;

View File

@@ -1,54 +0,0 @@
use codex_core::protocol_config_types::ReasoningEffort;
/// A simple preset pairing a model slug with a reasoning effort.
#[derive(Debug, Clone, Copy)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Reasoning effort to apply for this preset.
pub effort: ReasoningEffort,
}
/// Built-in list of model presets that pair a model with a reasoning effort.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_model_presets() -> &'static [ModelPreset] {
// Order reflects effort from minimal to high.
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
model: "gpt-5",
effort: ReasoningEffort::Minimal,
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: ReasoningEffort::Low,
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: ReasoningEffort::Medium,
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "— maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: ReasoningEffort::High,
},
];
PRESETS
}

View File

@@ -7,7 +7,7 @@
//! `config.toml`.
use clap::ValueEnum;
use codex_protocol::config_types::SandboxMode;
use codex_core::config_types::SandboxMode;
#[derive(Clone, Copy, Debug, ValueEnum)]
#[value(rename_all = "kebab-case")]

View File

@@ -149,7 +149,6 @@ approval_policy = "untrusted"
```
If you want to be notified whenever a command fails, use "on-failure":
```toml
# If the command fails when run in the sandbox, Codex asks for permission to
# retry the command outside the sandbox.
@@ -157,14 +156,12 @@ approval_policy = "on-failure"
```
If you want the model to run until it decides that it needs to ask you for escalated permissions, use "on-request":
```toml
# The model decides when to escalate
approval_policy = "on-request"
```
Alternatively, you can have the model run until it is done, and never ask to run a command with escalated permissions:
```toml
# User is never prompted: if the command fails, Codex will automatically try
# something out. Note the `exec` subcommand always uses this mode.
@@ -220,14 +217,17 @@ Users can specify config values at multiple levels. Order of precedence is as fo
## model_reasoning_effort
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
- `"minimal"`
- `"low"`
- `"medium"` (default)
- `"high"`
Note: to minimize reasoning, choose `"minimal"`.
To disable reasoning, set `model_reasoning_effort` to `"none"` in your config:
```toml
model_reasoning_effort = "none" # disable reasoning
```
## model_reasoning_summary
@@ -281,9 +281,6 @@ sandbox_mode = "workspace-write"
exclude_tmpdir_env_var = false
exclude_slash_tmp = false
# Optional list of _additional_ writable roots beyond $TMPDIR and /tmp.
writable_roots = ["/Users/YOU/.pyenv/shims"]
# Allow the command being run inside the sandbox to make outbound network
# requests. Disabled by default.
network_access = false
@@ -300,16 +297,6 @@ This is reasonable to use if Codex is running in an environment that provides it
Though using this option may also be necessary if you try to use Codex in environments where its native sandboxing mechanisms are unsupported, such as older Linux kernels or on Windows.
## Approval presets
Codex provides three main Approval Presets:
- Read Only: Codex can read files and answer questions; edits, running commands, and network access require approval.
- Auto: Codex can read files, make edits, and run commands in the workspace without approval; asks for approval outside the workspace or for network access.
- Full Access: Full disk and network access without prompts; extremely risky.
You can further customize how Codex runs at the command line using the `--ask-for-approval` and `--sandbox` options.
## mcp_servers
Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy).
@@ -511,12 +498,10 @@ hide_agent_reasoning = true # defaults to false
Surfaces the models raw chain-of-thought ("raw reasoning content") when available.
Notes:
- Only takes effect if the selected model/provider actually emits raw reasoning content. Many models do not. When unsupported, this option has no visible effect.
- Raw reasoning may include intermediate thoughts or sensitive context. Enable only if acceptable for your workflow.
Example:
```toml
show_raw_agent_reasoning = true # defaults to false
```

View File

@@ -19,21 +19,19 @@ chrono = { version = "0.4", features = ["serde"] }
codex-apply-patch = { path = "../apply-patch" }
codex-login = { path = "../login" }
codex-mcp-client = { path = "../mcp-client" }
codex-protocol = { path = "../protocol" }
dirs = "6"
env-flags = "0.1.1"
eventsource-stream = "0.2.3"
fs2 = "0.4.3"
futures = "0.3"
libc = "0.2.175"
libc = "0.2.174"
mcp-types = { path = "../mcp-types" }
mime_guess = "2.0"
os_info = "3.12.0"
rand = "0.9"
regex-lite = "0.1.6"
reqwest = { version = "0.12", features = ["json", "stream"] }
serde = { version = "1", features = ["derive"] }
serde_bytes = "0.11"
serde_json = "1"
serde_bytes = "0.11"
sha1 = "0.10.6"
shlex = "1.3.0"
similar = "2.7.0"

View File

@@ -1,7 +1,6 @@
You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.
Your capabilities:
- Receive user prompts and other context provided by the harness, such as files in the workspace.
- Communicate with the user by streaming thinking & responses, and by making & updating plans.
- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section.
@@ -21,13 +20,11 @@ Your default personality and tone is concise, direct, and friendly. You communic
Before making tool calls, send a brief preamble to the user explaining what youre about to do. When sending preamble messages, follow these principles and examples:
- **Logically group related actions**: if youre about to run several related commands, describe them together in one preamble rather than sending a separate note for each.
- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (812 words for quick updates).
- **Keep it concise**: be no more than 1-2 sentences (812 words for quick updates).
- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with whats been done so far and create a sense of momentum and clarity for the user to understand your next actions.
- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.
- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless its part of a larger grouped action.
**Examples:**
- “Ive explored the repo; now checking the API route definitions.”
- “Next, Ill patch the config and update the related tests.”
- “Im about to scaffold the CLI commands and helper functions.”
@@ -37,18 +34,15 @@ Before making tool calls, send a brief preamble to the user explaining what you
- “Alright, build pipeline order is interesting. Checking how it reports failures.”
- “Spotted a clever caching util; now hunting where it gets used.”
**Avoiding a preamble for every trivial read (e.g., `cat` a single file) unless its part of a larger grouped action.
- Jumping straight into tool calls without explaining whats about to happen.
- Writing overly long or speculative preambles — focus on immediate, tangible next steps.
## Planning
You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go.
Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately.
Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step.
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. Note that plans are not for padding out simple work with filler steps or stating the obvious. Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step.
Use a plan when:
- The task is non-trivial and will require multiple actions over a long time horizon.
- There are logical phases or dependencies where sequencing matters.
- The work has ambiguity that benefits from outlining high-level goals.
@@ -57,6 +51,14 @@ Use a plan when:
- The user has asked you to use the plan tool (aka "TODOs")
- You generate additional steps while working, and plan to do them before yielding to the user
Skip a plan when:
- The task is simple and direct.
- Breaking it down would only produce literal or trivial steps.
Planning steps are called "steps" in the tool, but really they're more like tasks or TODOs. As such they should be very concise descriptions of non-obvious work that an engineer might do like "Write the API spec", then "Update the backend", then "Implement the frontend". On the other hand, it's obvious that you'll usually have to "Explore the codebase" or "Implement the changes", so those are not worth tracking in your plan.
It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately.
### Examples
**High-quality plans**
@@ -113,11 +115,10 @@ If you need to write a plan, only write high quality plans, not low quality ones
You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer.
You MUST adhere to the following criteria when solving queries:
- Working on the repo(s) in the current environment is allowed, even if they are proprietary.
- Analyzing code for vulnerabilities is allowed.
- Showing user code and tool call details is allowed.
- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]}
- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]}
If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines:
@@ -147,25 +148,21 @@ For all of testing, running, building, and formatting, do not attempt to fix unr
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
Filesystem sandboxing prevents you from editing files without user approval. The options are:
- **read-only**: You can only read files.
- **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it.
- **danger-full-access**: No filesystem sandboxing.
- *read-only*: You can only read files.
- *workspace-write*: You can read files. You can write to files in your workspace folder, but not outside it.
- *danger-full-access*: No filesystem sandboxing.
Network sandboxing prevents you from accessing network without approval. Options are
- **restricted**
- **enabled**
- *ON*
- *OFF*
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
- *untrusted*: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
- *on-failure*: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
- *on-request*: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
- *never*: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
@@ -210,7 +207,6 @@ Brevity is very important as a default. You should be very concise (i.e. no more
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
**Section Headers**
- Use only when they improve clarity — they are not mandatory for every answer.
- Choose descriptive names that fit the content
- Keep headers short (13 words) and in `**Title Case**`. Always start headers with `**` and end with `**`
@@ -218,7 +214,6 @@ You are producing plain text that will later be styled by the CLI. Follow these
- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer.
**Bullets**
- Use `-` followed by a space for every bullet.
- Bold the keyword, then colon + concise description.
- Merge related points when possible; avoid a bullet for every trivial detail.
@@ -227,13 +222,11 @@ You are producing plain text that will later be styled by the CLI. Follow these
- Use consistent keyword phrasing and formatting across sections.
**Monospace**
- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``).
- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command.
- Never mix monospace and bold markers; choose one based on whether its a keyword (`**`) or inline code/path (`` ` ``).
**Structure**
- Place related bullets together; dont mix unrelated concepts in the same section.
- Order sections from general → specific → supporting info.
- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it.
@@ -242,7 +235,6 @@ You are producing plain text that will later be styled by the CLI. Follow these
- Simple results → minimal headers, possibly just a short list or paragraph.
**Tone**
- Keep the voice collaborative and natural, like a coding partner handing off work.
- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition
- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”).
@@ -250,7 +242,6 @@ You are producing plain text that will later be styled by the CLI. Follow these
- Use parallel structure in lists for consistency.
**Dont**
- Dont use literal words “bold” or “monospace” in the content.
- Dont nest bullets or create deep hierarchies.
- Dont output ANSI escape codes directly — the CLI renderer applies them.
@@ -261,14 +252,7 @@ Generally, ensure your final answers adapt their shape and depth to the request.
For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting.
# Tool Guidelines
## Shell commands
When using the shell, you must adhere to the following guidelines:
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
- Read files in chunks with a max chunk size of 250 lines. Do not use python scripts to attempt to output larger chunks of a file. Command line output will be truncated after 10 kilobytes or 256 lines of output, regardless of the command used.
# Tools
## `apply_patch`

View File

@@ -1,5 +1,4 @@
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
use crate::protocol::FileChange;
@@ -9,6 +8,7 @@ use crate::safety::assess_patch_safety;
use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::ApplyPatchFileChange;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
pub const CODEX_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch";
@@ -41,16 +41,21 @@ impl From<ResponseInputItem> for InternalApplyPatchInvocation {
pub(crate) async fn apply_patch(
sess: &Session,
turn_context: &TurnContext,
sub_id: &str,
call_id: &str,
action: ApplyPatchAction,
) -> InternalApplyPatchInvocation {
let writable_roots_snapshot = {
#[allow(clippy::unwrap_used)]
let guard = sess.writable_roots.lock().unwrap();
guard.clone()
};
match assess_patch_safety(
&action,
turn_context.approval_policy,
&turn_context.sandbox_policy,
&turn_context.cwd,
sess.approval_policy,
&writable_roots_snapshot,
&sess.cwd,
) {
SafetyCheck::AutoApprove { .. } => {
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
@@ -123,3 +128,30 @@ pub(crate) fn convert_apply_patch_to_protocol(
}
result
}
pub(crate) fn get_writable_roots(cwd: &Path) -> Vec<PathBuf> {
let mut writable_roots = Vec::new();
if cfg!(target_os = "macos") {
// On macOS, $TMPDIR is private to the user.
writable_roots.push(std::env::temp_dir());
// Allow pyenv to update its shims directory. Without this, any tool
// that happens to be managed by `pyenv` will fail with an error like:
//
// pyenv: cannot rehash: $HOME/.pyenv/shims isn't writable
//
// which is emitted every time `pyenv` tries to run `rehash` (for
// example, after installing a new Python package that drops an entry
// point). Although the sandbox is intentionally readonly by default,
// writing to the user's local `pyenv` directory is safe because it
// is already userwritable and scoped to the current user account.
if let Ok(home_dir) = std::env::var("HOME") {
let pyenv_dir = PathBuf::from(home_dir).join(".pyenv");
writable_roots.push(pyenv_dir);
}
}
writable_roots.push(cwd.to_path_buf());
writable_roots
}

View File

@@ -132,6 +132,7 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> {

View File

@@ -213,9 +213,7 @@ async fn process_chat_sse<S>(
let sse = match timeout(idle_timeout, stream.next()).await {
Ok(Some(Ok(ev))) => ev,
Ok(Some(Err(e))) => {
let _ = tx_event
.send(Err(CodexErr::Stream(e.to_string(), None)))
.await;
let _ = tx_event.send(Err(CodexErr::Stream(e.to_string()))).await;
return;
}
Ok(None) => {
@@ -230,10 +228,7 @@ async fn process_chat_sse<S>(
}
Err(_) => {
let _ = tx_event
.send(Err(CodexErr::Stream(
"idle timeout waiting for SSE".into(),
None,
)))
.send(Err(CodexErr::Stream("idle timeout waiting for SSE".into())))
.await;
return;
}
@@ -290,12 +285,13 @@ async fn process_chat_sse<S>(
.get("delta")
.and_then(|d| d.get("content"))
.and_then(|c| c.as_str())
&& !content.is_empty()
{
assistant_text.push_str(content);
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
.await;
if !content.is_empty() {
assistant_text.push_str(content);
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
.await;
}
}
// Forward any reasoning/thinking deltas if present.
@@ -332,25 +328,27 @@ async fn process_chat_sse<S>(
.get("delta")
.and_then(|d| d.get("tool_calls"))
.and_then(|tc| tc.as_array())
&& let Some(tool_call) = tool_calls.first()
{
// Mark that we have an active function call in progress.
fn_call_state.active = true;
if let Some(tool_call) = tool_calls.first() {
// Mark that we have an active function call in progress.
fn_call_state.active = true;
// Extract call_id if present.
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
}
// Extract function details if present.
if let Some(function) = tool_call.get("function") {
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
fn_call_state.name.get_or_insert_with(|| name.to_string());
// Extract call_id if present.
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
}
if let Some(args_fragment) = function.get("arguments").and_then(|a| a.as_str())
{
fn_call_state.arguments.push_str(args_fragment);
// Extract function details if present.
if let Some(function) = tool_call.get("function") {
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
fn_call_state.name.get_or_insert_with(|| name.to_string());
}
if let Some(args_fragment) =
function.get("arguments").and_then(|a| a.as_str())
{
fn_call_state.arguments.push_str(args_fragment);
}
}
}
}
@@ -488,14 +486,15 @@ where
// Only use the final assistant message if we have not
// seen any deltas; otherwise, deltas already built the
// cumulative text and this would duplicate it.
if this.cumulative.is_empty()
&& let crate::models::ResponseItem::Message { content, .. } = &item
&& let Some(text) = content.iter().find_map(|c| match c {
crate::models::ContentItem::OutputText { text } => Some(text),
_ => None,
})
{
this.cumulative.push_str(text);
if this.cumulative.is_empty() {
if let crate::models::ResponseItem::Message { content, .. } = &item {
if let Some(text) = content.iter().find_map(|c| match c {
crate::models::ContentItem::OutputText { text } => Some(text),
_ => None,
}) {
this.cumulative.push_str(text);
}
}
}
// Swallow assistant message here; emit on Completed.
@@ -589,9 +588,6 @@ where
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryDelta(_)))) => {
continue;
}
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded))) => {
continue;
}
}
}
}

View File

@@ -1,6 +1,5 @@
use std::io::BufRead;
use std::path::Path;
use std::sync::OnceLock;
use std::time::Duration;
use bytes::Bytes;
@@ -8,7 +7,6 @@ use codex_login::AuthMode;
use codex_login::CodexAuth;
use eventsource_stream::Eventsource;
use futures::prelude::*;
use regex_lite::Regex;
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
@@ -29,20 +27,18 @@ use crate::client_common::ResponseStream;
use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
use crate::error::Result;
use crate::error::UsageLimitReachedError;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
use crate::user_agent::get_codex_user_agent;
use crate::util::backoff;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use std::sync::Arc;
#[derive(Debug, Deserialize)]
@@ -52,12 +48,10 @@ struct ErrorResponse {
#[derive(Debug, Deserialize)]
struct Error {
r#type: Option<String>,
code: Option<String>,
message: Option<String>,
r#type: String,
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
auth: Option<CodexAuth>,
@@ -208,9 +202,12 @@ impl ModelClient {
req_builder = req_builder.header("chatgpt-account-id", account_id);
}
let originator = &self.config.responses_originator_header;
let originator = self
.config
.internal_originator
.as_deref()
.unwrap_or("codex_cli_rs");
req_builder = req_builder.header("originator", originator);
req_builder = req_builder.header("User-Agent", get_codex_user_agent(Some(originator)));
let res = req_builder.send().await;
if let Ok(resp) = &res {
@@ -248,12 +245,6 @@ impl ModelClient {
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse::<u64>().ok());
if status == StatusCode::UNAUTHORIZED
&& let Some(a) = auth.as_ref()
{
let _ = a.refresh_token().await;
}
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
// errors. When we bubble early with only the HTTP status the caller sees an opaque
// "unexpected status 400 Bad Request" which makes debugging nearly impossible.
@@ -261,10 +252,7 @@ impl ModelClient {
// exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is
// small and this branch only runs on error paths so the extra allocation is
// negligible.
if !(status == StatusCode::TOO_MANY_REQUESTS
|| status == StatusCode::UNAUTHORIZED
|| status.is_server_error())
{
if !(status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error()) {
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
let body = res.text().await.unwrap_or_default();
return Err(CodexErr::UnexpectedStatus(status, body));
@@ -273,18 +261,14 @@ impl ModelClient {
if status == StatusCode::TOO_MANY_REQUESTS {
let body = res.json::<ErrorResponse>().await.ok();
if let Some(ErrorResponse {
error:
Error {
r#type: Some(error_type),
..
},
error: Error { r#type, .. },
}) = body
{
if error_type == "usage_limit_reached" {
if r#type == "usage_limit_reached" {
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
plan_type: auth.and_then(|a| a.get_plan_type()),
}));
} else if error_type == "usage_not_included" {
} else if r#type == "usage_not_included" {
return Err(CodexErr::UsageNotIncluded);
}
}
@@ -317,30 +301,6 @@ impl ModelClient {
pub fn get_provider(&self) -> ModelProviderInfo {
self.provider.clone()
}
/// Returns the currently configured model slug.
pub fn get_model(&self) -> String {
self.config.model.clone()
}
/// Returns the currently configured model family.
pub fn get_model_family(&self) -> ModelFamily {
self.config.model_family.clone()
}
/// Returns the current reasoning effort setting.
pub fn get_reasoning_effort(&self) -> ReasoningEffortConfig {
self.effort
}
/// Returns the current reasoning summary setting.
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
self.summary
}
pub fn get_auth(&self) -> Option<CodexAuth> {
self.auth.clone()
}
}
#[derive(Debug, Deserialize, Serialize)]
@@ -404,14 +364,13 @@ async fn process_sse<S>(
// If the stream stays completely silent for an extended period treat it as disconnected.
// The response id returned from the "complete" message.
let mut response_completed: Option<ResponseCompleted> = None;
let mut response_error: Option<CodexErr> = None;
loop {
let sse = match timeout(idle_timeout, stream.next()).await {
Ok(Some(Ok(sse))) => sse,
Ok(Some(Err(e))) => {
debug!("SSE Error: {e:#}");
let event = CodexErr::Stream(e.to_string(), None);
let event = CodexErr::Stream(e.to_string());
let _ = tx_event.send(Err(event)).await;
return;
}
@@ -429,10 +388,9 @@ async fn process_sse<S>(
}
None => {
let _ = tx_event
.send(Err(response_error.unwrap_or(CodexErr::Stream(
.send(Err(CodexErr::Stream(
"stream closed before response.completed".into(),
None,
))))
)))
.await;
}
}
@@ -440,10 +398,7 @@ async fn process_sse<S>(
}
Err(_) => {
let _ = tx_event
.send(Err(CodexErr::Stream(
"idle timeout waiting for SSE".into(),
None,
)))
.send(Err(CodexErr::Stream("idle timeout waiting for SSE".into())))
.await;
return;
}
@@ -521,25 +476,15 @@ async fn process_sse<S>(
}
"response.failed" => {
if let Some(resp_val) = event.response {
response_error = Some(CodexErr::Stream(
"response.failed event received".to_string(),
None,
));
let error = resp_val
.get("error")
.and_then(|v| v.get("message"))
.and_then(|v| v.as_str())
.unwrap_or("response.failed event received");
let error = resp_val.get("error");
if let Some(error) = error {
match serde_json::from_value::<Error>(error.clone()) {
Ok(error) => {
let delay = try_parse_retry_after(&error);
let message = error.message.unwrap_or_default();
response_error = Some(CodexErr::Stream(message, delay));
}
Err(e) => {
debug!("failed to parse ErrorResponse: {e}");
}
}
}
let _ = tx_event
.send(Err(CodexErr::Stream(error.to_string())))
.await;
}
}
// Final response completed includes array of output items & id
@@ -560,18 +505,12 @@ async fn process_sse<S>(
| "response.function_call_arguments.delta"
| "response.in_progress"
| "response.output_item.added"
| "response.output_text.done" => {
// Currently, we ignore this event, but we handle it
| "response.output_text.done"
| "response.reasoning_summary_part.added"
| "response.reasoning_summary_text.done" => {
// Currently, we ignore these events, but we handle them
// separately to skip the logging message in the `other` case.
}
"response.reasoning_summary_part.added" => {
// Boundary between reasoning summary sections (e.g., titles).
let event = ResponseEvent::ReasoningSummaryPartAdded;
if tx_event.send(Ok(event)).await.is_err() {
return;
}
}
"response.reasoning_summary_text.done" => {}
other => debug!(other, "sse event"),
}
}
@@ -603,42 +542,10 @@ async fn stream_from_fixture(
Ok(ResponseStream { rx_event })
}
fn rate_limit_regex() -> &'static Regex {
static RE: OnceLock<Regex> = OnceLock::new();
#[expect(clippy::unwrap_used)]
RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap())
}
fn try_parse_retry_after(err: &Error) -> Option<Duration> {
if err.code != Some("rate_limit_exceeded".to_string()) {
return None;
}
// parse the Please try again in 1.898s format using regex
let re = rate_limit_regex();
if let Some(message) = &err.message
&& let Some(captures) = re.captures(message)
{
let seconds = captures.get(1);
let unit = captures.get(2);
if let (Some(value), Some(unit)) = (seconds, unit) {
let value = value.as_str().parse::<f64>().ok()?;
let unit = unit.as_str();
if unit == "s" {
return Some(Duration::from_secs_f64(value));
} else if unit == "ms" {
return Some(Duration::from_millis(value as u64));
}
}
}
None
}
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
use serde_json::json;
use tokio::sync::mpsc;
@@ -820,49 +727,13 @@ mod tests {
matches!(events[0], Ok(ResponseEvent::OutputItemDone(_)));
match &events[1] {
Err(CodexErr::Stream(msg, _)) => {
Err(CodexErr::Stream(msg)) => {
assert_eq!(msg, "stream closed before response.completed")
}
other => panic!("unexpected second event: {other:?}"),
}
}
#[tokio::test]
async fn error_when_error_event() {
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
let provider = ModelProviderInfo {
name: "test".to_string(),
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
requires_openai_auth: false,
};
let events = collect_events(&[sse1.as_bytes()], provider).await;
assert_eq!(events.len(), 1);
match &events[0] {
Err(CodexErr::Stream(msg, delay)) => {
assert_eq!(
msg,
"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."
);
assert_eq!(*delay, Some(Duration::from_secs_f64(11.054)));
}
other => panic!("unexpected second event: {other:?}"),
}
}
// ────────────────────────────
// Table-driven test from `main`
// ────────────────────────────
@@ -961,27 +832,4 @@ mod tests {
);
}
}
#[test]
fn test_try_parse_retry_after() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_millis(28)));
}
#[test]
fn test_try_parse_retry_after_no_delay() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
};
let delay = try_parse_retry_after(&err);
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
}
}

View File

@@ -1,15 +1,19 @@
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::Result;
use crate::model_family::ModelFamily;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::openai_tools::OpenAiTool;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use futures::Stream;
use serde::Serialize;
use std::borrow::Cow;
use std::fmt::Display;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
@@ -19,19 +23,62 @@ use tokio::sync::mpsc;
/// with this content.
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
/// wraps environment context message in a tag for the model to parse more easily.
const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n\n";
const ENVIRONMENT_CONTEXT_END: &str = "\n\n</environment_context>";
/// wraps user instructions message in a tag for the model to parse more easily.
const USER_INSTRUCTIONS_START: &str = "<user_instructions>\n\n";
const USER_INSTRUCTIONS_END: &str = "\n\n</user_instructions>";
/// API request payload for a single model turn
#[derive(Debug, Clone)]
pub(crate) struct EnvironmentContext {
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
}
impl Display for EnvironmentContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"Current working directory: {}",
self.cwd.to_string_lossy()
)?;
writeln!(f, "Approval policy: {}", self.approval_policy)?;
writeln!(f, "Sandbox policy: {}", self.sandbox_policy)?;
let network_access = match self.sandbox_policy.clone() {
SandboxPolicy::DangerFullAccess => "enabled",
SandboxPolicy::ReadOnly => "restricted",
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
if network_access {
"enabled"
} else {
"restricted"
}
}
};
writeln!(f, "Network access: {network_access}")?;
Ok(())
}
}
/// API request payload for a single model turn.
#[derive(Default, Debug, Clone)]
pub struct Prompt {
/// Conversation context input items.
pub input: Vec<ResponseItem>,
/// Optional instructions from the user to amend to the built-in agent
/// instructions.
pub user_instructions: Option<String>,
/// Whether to store response on server side (disable_response_storage = !store).
pub store: bool,
/// A list of key-value pairs that will be added as a developer message
/// for the model to use
pub environment_context: Option<EnvironmentContext>,
/// Tools available to the model, including additional tools sourced from
/// external MCP servers.
pub tools: Vec<OpenAiTool>,
@@ -53,19 +100,36 @@ impl Prompt {
Cow::Owned(sections.join("\n"))
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
self.input.clone()
fn get_formatted_user_instructions(&self) -> Option<String> {
self.user_instructions
.as_ref()
.map(|ui| format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"))
}
/// Creates a formatted user instructions message from a string
pub(crate) fn format_user_instructions_message(ui: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"),
}],
fn get_formatted_environment_context(&self) -> Option<String> {
self.environment_context
.as_ref()
.map(|ec| format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"))
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
let mut input_with_instructions = Vec::with_capacity(self.input.len() + 2);
if let Some(ec) = self.get_formatted_environment_context() {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ec }],
});
}
if let Some(ui) = self.get_formatted_user_instructions() {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ui }],
});
}
input_with_instructions.extend(self.input.clone());
input_with_instructions
}
}
@@ -80,13 +144,57 @@ pub enum ResponseEvent {
OutputTextDelta(String),
ReasoningSummaryDelta(String),
ReasoningContentDelta(String),
ReasoningSummaryPartAdded,
}
#[derive(Debug, Serialize)]
pub(crate) struct Reasoning {
pub(crate) effort: ReasoningEffortConfig,
pub(crate) summary: ReasoningSummaryConfig,
pub(crate) effort: OpenAiReasoningEffort,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) summary: Option<OpenAiReasoningSummary>,
}
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningEffort {
Low,
#[default]
Medium,
High,
}
impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
fn from(effort: ReasoningEffortConfig) -> Self {
match effort {
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
ReasoningEffortConfig::None => None,
}
}
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
}
impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
fn from(summary: ReasoningSummaryConfig) -> Self {
match summary {
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
ReasoningSummaryConfig::None => None,
}
}
}
/// Request object that is serialized as JSON and POST'ed when using the
@@ -117,7 +225,12 @@ pub(crate) fn create_reasoning_param_for_request(
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if model_family.supports_reasoning_summaries {
Some(Reasoning { effort, summary })
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
Some(Reasoning {
effort,
summary: summary.into(),
})
} else {
None
}
@@ -137,6 +250,7 @@ impl Stream for ResponseStream {
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used)]
use crate::model_family::find_family_for_model;
use super::*;
@@ -144,6 +258,7 @@ mod tests {
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
user_instructions: Some("custom instruction".to_string()),
..Default::default()
};
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +0,0 @@
use crate::codex::Codex;
use crate::error::Result as CodexResult;
use crate::protocol::Event;
use crate::protocol::Op;
use crate::protocol::Submission;
pub struct CodexConversation {
codex: Codex,
}
/// Conduit for the bidirectional stream of messages that compose a conversation
/// in Codex.
impl CodexConversation {
pub(crate) fn new(codex: Codex) -> Self {
Self { codex }
}
pub async fn submit(&self, op: Op) -> CodexResult<String> {
self.codex.submit(op).await
}
/// Use sparingly: this is intended to be removed soon.
pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> {
self.codex.submit_with_id(sub).await
}
pub async fn next_event(&self) -> CodexResult<Event> {
self.codex.next_event().await
}
}

View File

@@ -0,0 +1,59 @@
use std::sync::Arc;
use crate::Codex;
use crate::CodexSpawnOk;
use crate::config::Config;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::util::notify_on_sigint;
use codex_login::CodexAuth;
use tokio::sync::Notify;
use uuid::Uuid;
/// Represents an active Codex conversation, including the first event
/// (which is [`EventMsg::SessionConfigured`]).
pub struct CodexConversation {
pub codex: Codex,
pub session_id: Uuid,
pub session_configured: Event,
pub ctrl_c: Arc<Notify>,
}
/// Spawn a new [`Codex`] and initialize the session.
///
/// Returns the wrapped [`Codex`] **and** the `SessionInitialized` event that
/// is received as a response to the initial `ConfigureSession` submission so
/// that callers can surface the information to the UI.
pub async fn init_codex(config: Config) -> anyhow::Result<CodexConversation> {
let ctrl_c = notify_on_sigint();
let auth = CodexAuth::from_codex_home(&config.codex_home)?;
let CodexSpawnOk {
codex,
init_id,
session_id,
} = Codex::spawn(config, auth, ctrl_c.clone()).await?;
// The first event must be `SessionInitialized`. Validate and forward it to
// the caller so that they can display it in the conversation history.
let event = codex.next_event().await?;
if event.id != init_id
|| !matches!(
&event,
Event {
id: _id,
msg: EventMsg::SessionConfigured(_),
}
)
{
return Err(anyhow::anyhow!(
"expected SessionInitialized but got {event:?}"
));
}
Ok(CodexConversation {
codex,
session_id,
session_configured: event,
ctrl_c,
})
}

View File

@@ -1,6 +1,9 @@
use crate::config_profile::ConfigProfile;
use crate::config_types::History;
use crate::config_types::McpServerConfig;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::config_types::SandboxMode;
use crate::config_types::SandboxWorkspaceWrite;
use crate::config_types::ShellEnvironmentPolicy;
use crate::config_types::ShellEnvironmentPolicyToml;
@@ -13,10 +16,6 @@ use crate::model_provider_info::built_in_model_providers;
use crate::openai_model_info::get_model_info;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use codex_login::AuthMode;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use dirs::home_dir;
use serde::Deserialize;
use std::collections::HashMap;
@@ -35,8 +34,6 @@ pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
const CONFIG_TOML_FILE: &str = "config.toml";
const DEFAULT_RESPONSES_ORIGINATOR_HEADER: &str = "codex_cli_rs";
/// Application configuration loaded from disk and merged with overrides.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
@@ -142,8 +139,8 @@ pub struct Config {
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
pub codex_linux_sandbox_exe: Option<PathBuf>,
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
/// If not "none", the value to use for `reasoning.effort` when making a
/// request using the Responses API.
pub model_reasoning_effort: ReasoningEffort,
/// If not "none", the value to use for `reasoning.summary` when making a
@@ -159,16 +156,8 @@ pub struct Config {
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
pub include_plan_tool: bool,
/// Include the `apply_patch` tool for models that benefit from invoking
/// file edits as a structured tool call. When unset, this falls back to the
/// model family's default preference.
pub include_apply_patch_tool: bool,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header: String,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: AuthMode,
pub internal_originator: Option<String>,
}
impl Config {
@@ -412,12 +401,9 @@ pub struct ConfigToml {
pub experimental_instructions_file: Option<PathBuf>,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header_internal_override: Option<String>,
pub internal_originator: Option<String>,
pub projects: Option<HashMap<String, ProjectConfig>>,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: Option<AuthMode>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
@@ -494,7 +480,6 @@ pub struct ConfigOverrides {
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub include_plan_tool: Option<bool>,
pub include_apply_patch_tool: Option<bool>,
pub disable_response_storage: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
}
@@ -520,7 +505,6 @@ impl Config {
codex_linux_sandbox_exe,
base_instructions,
include_plan_tool,
include_apply_patch_tool,
disable_response_storage,
show_raw_agent_reasoning,
} = overrides;
@@ -597,7 +581,6 @@ impl Config {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
}
});
@@ -624,13 +607,6 @@ impl Config {
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
let base_instructions = base_instructions.or(file_base_instructions);
let include_apply_patch_tool_val =
include_apply_patch_tool.unwrap_or(model_family.uses_apply_patch_tool);
let responses_originator_header: String = cfg
.responses_originator_header_internal_override
.unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
let config = Self {
model,
model_family,
@@ -683,9 +659,7 @@ impl Config {
experimental_resume,
include_plan_tool: include_plan_tool.unwrap_or(false),
include_apply_patch_tool: include_apply_patch_tool_val,
responses_originator_header,
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
internal_originator: cfg.internal_originator,
};
Ok(config)
}
@@ -765,10 +739,10 @@ fn default_model() -> String {
pub fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
if let Ok(val) = std::env::var("CODEX_HOME") {
if !val.is_empty() {
return PathBuf::from(val).canonicalize();
}
}
let mut p = home_dir().ok_or_else(|| {
@@ -791,6 +765,7 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use crate::config_types::HistoryPersistence;
use super::*;
@@ -1048,9 +1023,7 @@ disable_response_storage = true
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
},
o3_profile_config
);
@@ -1101,9 +1074,7 @@ disable_response_storage = true
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -1169,9 +1140,7 @@ disable_response_storage = true
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
internal_originator: None,
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -1,9 +1,9 @@
use serde::Deserialize;
use std::path::PathBuf;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
/// Collection of common configuration options that a user can define as a unit
/// in `config.toml`.

View File

@@ -5,9 +5,11 @@
use std::collections::HashMap;
use std::path::PathBuf;
use strum_macros::Display;
use wildmatch::WildMatchPattern;
use serde::Deserialize;
use serde::Serialize;
#[derive(Deserialize, Debug, Clone, PartialEq)]
pub struct McpServerConfig {
@@ -76,6 +78,20 @@ pub enum HistoryPersistence {
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Tui {}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize)]
#[serde(rename_all = "kebab-case")]
pub enum SandboxMode {
#[serde(rename = "read-only")]
#[default]
ReadOnly,
#[serde(rename = "workspace-write")]
WorkspaceWrite,
#[serde(rename = "danger-full-access")]
DangerFullAccess,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct SandboxWorkspaceWrite {
#[serde(default)]
@@ -183,3 +199,31 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
}
}
}
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort {
Low,
#[default]
Medium,
High,
/// Option to disable reasoning.
None,
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
/// Option to disable reasoning summaries.
None,
}

View File

@@ -1,96 +0,0 @@
use std::collections::HashMap;
use std::sync::Arc;
use codex_login::CodexAuth;
use tokio::sync::RwLock;
use uuid::Uuid;
use crate::codex::Codex;
use crate::codex::CodexSpawnOk;
use crate::codex::INITIAL_SUBMIT_ID;
use crate::codex_conversation::CodexConversation;
use crate::config::Config;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::SessionConfiguredEvent;
/// Represents a newly created Codex conversation, including the first event
/// (which is [`EventMsg::SessionConfigured`]).
pub struct NewConversation {
pub conversation_id: Uuid,
pub conversation: Arc<CodexConversation>,
pub session_configured: SessionConfiguredEvent,
}
/// [`ConversationManager`] is responsible for creating conversations and
/// maintaining them in memory.
pub struct ConversationManager {
conversations: Arc<RwLock<HashMap<Uuid, Arc<CodexConversation>>>>,
}
impl Default for ConversationManager {
fn default() -> Self {
Self {
conversations: Arc::new(RwLock::new(HashMap::new())),
}
}
}
impl ConversationManager {
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
let auth = CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method)?;
self.new_conversation_with_auth(config, auth).await
}
/// Used for integration tests: should not be used by ordinary business
/// logic.
pub async fn new_conversation_with_auth(
&self,
config: Config,
auth: Option<CodexAuth>,
) -> CodexResult<NewConversation> {
let CodexSpawnOk {
codex,
session_id: conversation_id,
} = Codex::spawn(config, auth).await?;
// The first event must be `SessionInitialized`. Validate and forward it
// to the caller so that they can display it in the conversation
// history.
let event = codex.next_event().await?;
let session_configured = match event {
Event {
id,
msg: EventMsg::SessionConfigured(session_configured),
} if id == INITIAL_SUBMIT_ID => session_configured,
_ => {
return Err(CodexErr::SessionConfiguredNotFirstEvent);
}
};
let conversation = Arc::new(CodexConversation::new(codex));
self.conversations
.write()
.await
.insert(conversation_id, conversation.clone());
Ok(NewConversation {
conversation_id,
conversation,
session_configured,
})
}
pub async fn get_conversation(
&self,
conversation_id: Uuid,
) -> CodexResult<Arc<CodexConversation>> {
let conversations = self.conversations.read().await;
conversations
.get(&conversation_id)
.cloned()
.ok_or_else(|| CodexErr::ConversationNotFound(conversation_id))
}
}

View File

@@ -1,86 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display as DeriveDisplay;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use codex_protocol::config_types::SandboxMode;
use std::fmt::Display;
use std::path::PathBuf;
/// wraps environment context message in a tag for the model to parse more easily.
pub(crate) const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n";
pub(crate) const ENVIRONMENT_CONTEXT_END: &str = "</environment_context>";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, DeriveDisplay)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum NetworkAccess {
Restricted,
Enabled,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename = "environment_context", rename_all = "snake_case")]
pub(crate) struct EnvironmentContext {
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_mode: SandboxMode,
pub network_access: NetworkAccess,
}
impl EnvironmentContext {
pub fn new(
cwd: PathBuf,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
) -> Self {
Self {
cwd,
approval_policy,
sandbox_mode: match sandbox_policy {
SandboxPolicy::DangerFullAccess => SandboxMode::DangerFullAccess,
SandboxPolicy::ReadOnly => SandboxMode::ReadOnly,
SandboxPolicy::WorkspaceWrite { .. } => SandboxMode::WorkspaceWrite,
},
network_access: match sandbox_policy {
SandboxPolicy::DangerFullAccess => NetworkAccess::Enabled,
SandboxPolicy::ReadOnly => NetworkAccess::Restricted,
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
if network_access {
NetworkAccess::Enabled
} else {
NetworkAccess::Restricted
}
}
},
}
}
}
impl Display for EnvironmentContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"Current working directory: {}",
self.cwd.to_string_lossy()
)?;
writeln!(f, "Approval policy: {}", self.approval_policy)?;
writeln!(f, "Sandbox mode: {}", self.sandbox_mode)?;
writeln!(f, "Network access: {}", self.network_access)?;
Ok(())
}
}
impl From<EnvironmentContext> for ResponseItem {
fn from(ec: EnvironmentContext) -> Self {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"),
}],
}
}
}

View File

@@ -1,10 +1,8 @@
use reqwest::StatusCode;
use serde_json;
use std::io;
use std::time::Duration;
use thiserror::Error;
use tokio::task::JoinError;
use uuid::Uuid;
pub type Result<T> = std::result::Result<T, CodexErr>;
@@ -43,16 +41,8 @@ pub enum CodexErr {
/// handshake has succeeded but **before** it finished emitting `response.completed`.
///
/// The Session loop treats this as a transient error and will automatically retry the turn.
///
/// Optionally includes the requested delay before retrying the turn.
#[error("stream disconnected before completion: {0}")]
Stream(String, Option<Duration>),
#[error("no conversation with id: {0}")]
ConversationNotFound(Uuid),
#[error("session configured event was not the first event in the stream")]
SessionConfiguredNotFirstEvent,
Stream(String),
/// Returned by run_command_stream when the spawned child process timed out (10s).
#[error("timeout waiting for child process to exit")]

View File

@@ -3,8 +3,10 @@ use std::os::unix::process::ExitStatusExt;
use std::collections::HashMap;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::process::ExitStatus;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
@@ -13,11 +15,11 @@ use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt;
use tokio::io::BufReader;
use tokio::process::Child;
use tokio::sync::Notify;
use crate::error::CodexErr;
use crate::error::Result;
use crate::error::SandboxErr;
use crate::landlock::spawn_command_under_linux_sandbox;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::ExecCommandOutputDeltaEvent;
@@ -78,6 +80,7 @@ pub struct StdoutStream {
pub async fn process_exec_tool_call(
params: ExecParams,
sandbox_type: SandboxType,
ctrl_c: Arc<Notify>,
sandbox_policy: &SandboxPolicy,
codex_linux_sandbox_exe: &Option<PathBuf>,
stdout_stream: Option<StdoutStream>,
@@ -86,7 +89,7 @@ pub async fn process_exec_tool_call(
let raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr> = match sandbox_type
{
SandboxType::None => exec(params, sandbox_policy, stdout_stream.clone()).await,
SandboxType::None => exec(params, sandbox_policy, ctrl_c, stdout_stream.clone()).await,
SandboxType::MacosSeatbelt => {
let timeout = params.timeout_duration();
let ExecParams {
@@ -100,7 +103,7 @@ pub async fn process_exec_tool_call(
env,
)
.await?;
consume_truncated_output(child, timeout, stdout_stream.clone()).await
consume_truncated_output(child, ctrl_c, timeout, stdout_stream.clone()).await
}
SandboxType::LinuxSeccomp => {
let timeout = params.timeout_duration();
@@ -121,7 +124,7 @@ pub async fn process_exec_tool_call(
)
.await?;
consume_truncated_output(child, timeout, stdout_stream).await
consume_truncated_output(child, ctrl_c, timeout, stdout_stream).await
}
};
let duration = start.elapsed();
@@ -163,6 +166,65 @@ pub async fn process_exec_tool_call(
}
}
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
/// (codex-linux-sandbox).
///
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
/// the equivalent CLI options.
pub async fn spawn_command_under_linux_sandbox<P>(
codex_linux_sandbox_exe: P,
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child>
where
P: AsRef<Path>,
{
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(
codex_linux_sandbox_exe.as_ref().to_path_buf(),
args,
arg0,
cwd,
sandbox_policy,
stdio_policy,
env,
)
.await
}
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
fn create_linux_sandbox_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
) -> Vec<String> {
#[expect(clippy::expect_used)]
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
#[expect(clippy::expect_used)]
let sandbox_policy_json =
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
let mut linux_cmd: Vec<String> = vec![
sandbox_policy_cwd,
sandbox_policy_json,
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
"--".to_string(),
];
// Append the original tool command.
linux_cmd.extend(command);
linux_cmd
}
/// We don't have a fully deterministic way to tell if our command failed
/// because of the sandbox - a command in the user's zshrc file might hit an
/// error, but the command itself might fail or succeed for other reasons.
@@ -224,6 +286,7 @@ pub struct ExecToolCallOutput {
async fn exec(
params: ExecParams,
sandbox_policy: &SandboxPolicy,
ctrl_c: Arc<Notify>,
stdout_stream: Option<StdoutStream>,
) -> Result<RawExecToolCallOutput> {
let timeout = params.timeout_duration();
@@ -248,13 +311,14 @@ async fn exec(
env,
)
.await?;
consume_truncated_output(child, timeout, stdout_stream).await
consume_truncated_output(child, ctrl_c, timeout, stdout_stream).await
}
/// Consumes the output of a child process, truncating it so it is suitable for
/// use as the output of a `shell` tool call. Also enforces specified timeout.
pub(crate) async fn consume_truncated_output(
mut child: Child,
ctrl_c: Arc<Notify>,
timeout: Duration,
stdout_stream: Option<StdoutStream>,
) -> Result<RawExecToolCallOutput> {
@@ -288,6 +352,7 @@ pub(crate) async fn consume_truncated_output(
true,
));
let interrupted = ctrl_c.notified();
let exit_status = tokio::select! {
result = tokio::time::timeout(timeout, child.wait()) => {
match result {
@@ -301,7 +366,7 @@ pub(crate) async fn consume_truncated_output(
}
}
}
_ = tokio::signal::ctrl_c() => {
_ = interrupted => {
child.start_kill()?;
synthetic_exit_status(128 + SIGKILL_CODE)
}

View File

@@ -70,6 +70,8 @@ where
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use super::*;
use crate::config_types::ShellEnvironmentPolicyInherit;
use maplit::hashmap;

View File

@@ -1,16 +1,11 @@
use std::collections::HashSet;
use std::path::Path;
use codex_protocol::mcp_protocol::GitSha;
use futures::future::join_all;
use serde::Deserialize;
use serde::Serialize;
use tokio::process::Command;
use tokio::time::Duration as TokioDuration;
use tokio::time::timeout;
use crate::util::is_inside_git_repo;
/// Timeout for git commands to prevent freezing on large repositories
const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5);
@@ -27,12 +22,6 @@ pub struct GitInfo {
pub repository_url: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct GitDiffToRemote {
pub sha: GitSha,
pub diff: String,
}
/// Collect git repository information from the given working directory using command-line git.
/// Returns None if no git repository is found or if git operations fail.
/// Uses timeouts to prevent freezing on large repositories.
@@ -62,52 +51,38 @@ pub async fn collect_git_info(cwd: &Path) -> Option<GitInfo> {
};
// Process commit hash
if let Some(output) = commit_result
&& output.status.success()
&& let Ok(hash) = String::from_utf8(output.stdout)
{
git_info.commit_hash = Some(hash.trim().to_string());
if let Some(output) = commit_result {
if output.status.success() {
if let Ok(hash) = String::from_utf8(output.stdout) {
git_info.commit_hash = Some(hash.trim().to_string());
}
}
}
// Process branch name
if let Some(output) = branch_result
&& output.status.success()
&& let Ok(branch) = String::from_utf8(output.stdout)
{
let branch = branch.trim();
if branch != "HEAD" {
git_info.branch = Some(branch.to_string());
if let Some(output) = branch_result {
if output.status.success() {
if let Ok(branch) = String::from_utf8(output.stdout) {
let branch = branch.trim();
if branch != "HEAD" {
git_info.branch = Some(branch.to_string());
}
}
}
}
// Process repository URL
if let Some(output) = url_result
&& output.status.success()
&& let Ok(url) = String::from_utf8(output.stdout)
{
git_info.repository_url = Some(url.trim().to_string());
if let Some(output) = url_result {
if output.status.success() {
if let Ok(url) = String::from_utf8(output.stdout) {
git_info.repository_url = Some(url.trim().to_string());
}
}
}
Some(git_info)
}
/// Returns the closest git sha to HEAD that is on a remote as well as the diff to that sha.
pub async fn git_diff_to_remote(cwd: &Path) -> Option<GitDiffToRemote> {
if !is_inside_git_repo(cwd) {
return None;
}
let remotes = get_git_remotes(cwd).await?;
let branches = branch_ancestry(cwd).await?;
let base_sha = find_closest_sha(cwd, &branches, &remotes).await?;
let diff = diff_against_sha(cwd, &base_sha).await?;
Some(GitDiffToRemote {
sha: base_sha,
diff,
})
}
/// Run a git command with a timeout to prevent blocking on large repositories
async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::process::Output> {
let result = timeout(
@@ -122,311 +97,11 @@ async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::
}
}
async fn get_git_remotes(cwd: &Path) -> Option<Vec<String>> {
let output = run_git_command_with_timeout(&["remote"], cwd).await?;
if !output.status.success() {
return None;
}
let mut remotes: Vec<String> = String::from_utf8(output.stdout)
.ok()?
.lines()
.map(|s| s.to_string())
.collect();
if let Some(pos) = remotes.iter().position(|r| r == "origin") {
let origin = remotes.remove(pos);
remotes.insert(0, origin);
}
Some(remotes)
}
/// Attempt to determine the repository's default branch name.
///
/// Preference order:
/// 1) The symbolic ref at `refs/remotes/<remote>/HEAD` for the first remote (origin prioritized)
/// 2) `git remote show <remote>` parsed for "HEAD branch: <name>"
/// 3) Local fallback to existing `main` or `master` if present
async fn get_default_branch(cwd: &Path) -> Option<String> {
// Prefer the first remote (with origin prioritized)
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
for remote in remotes {
// Try symbolic-ref, which returns something like: refs/remotes/origin/main
if let Some(symref_output) = run_git_command_with_timeout(
&[
"symbolic-ref",
"--quiet",
&format!("refs/remotes/{remote}/HEAD"),
],
cwd,
)
.await
&& symref_output.status.success()
&& let Ok(sym) = String::from_utf8(symref_output.stdout)
{
let trimmed = sym.trim();
if let Some((_, name)) = trimmed.rsplit_once('/') {
return Some(name.to_string());
}
}
// Fall back to parsing `git remote show <remote>` output
if let Some(show_output) =
run_git_command_with_timeout(&["remote", "show", &remote], cwd).await
&& show_output.status.success()
&& let Ok(text) = String::from_utf8(show_output.stdout)
{
for line in text.lines() {
let line = line.trim();
if let Some(rest) = line.strip_prefix("HEAD branch:") {
let name = rest.trim();
if !name.is_empty() {
return Some(name.to_string());
}
}
}
}
}
// No remote-derived default; try common local defaults if they exist
for candidate in ["main", "master"] {
if let Some(verify) = run_git_command_with_timeout(
&[
"rev-parse",
"--verify",
"--quiet",
&format!("refs/heads/{candidate}"),
],
cwd,
)
.await
&& verify.status.success()
{
return Some(candidate.to_string());
}
}
None
}
/// Build an ancestry of branches starting at the current branch and ending at the
/// repository's default branch (if determinable)..
async fn branch_ancestry(cwd: &Path) -> Option<Vec<String>> {
// Discover current branch (ignore detached HEAD by treating it as None)
let current_branch = run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd)
.await
.and_then(|o| {
if o.status.success() {
String::from_utf8(o.stdout).ok()
} else {
None
}
})
.map(|s| s.trim().to_string())
.filter(|s| s != "HEAD");
// Discover default branch
let default_branch = get_default_branch(cwd).await;
let mut ancestry: Vec<String> = Vec::new();
let mut seen: HashSet<String> = HashSet::new();
if let Some(cb) = current_branch.clone() {
seen.insert(cb.clone());
ancestry.push(cb);
}
if let Some(db) = default_branch
&& !seen.contains(&db)
{
seen.insert(db.clone());
ancestry.push(db);
}
// Expand candidates: include any remote branches that already contain HEAD.
// This addresses cases where we're on a new local-only branch forked from a
// remote branch that isn't the repository default. We prioritize remotes in
// the order returned by get_git_remotes (origin first).
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
for remote in remotes {
if let Some(output) = run_git_command_with_timeout(
&[
"for-each-ref",
"--format=%(refname:short)",
"--contains=HEAD",
&format!("refs/remotes/{remote}"),
],
cwd,
)
.await
&& output.status.success()
&& let Ok(text) = String::from_utf8(output.stdout)
{
for line in text.lines() {
let short = line.trim();
// Expect format like: "origin/feature"; extract the branch path after "remote/"
if let Some(stripped) = short.strip_prefix(&format!("{remote}/"))
&& !stripped.is_empty()
&& !seen.contains(stripped)
{
seen.insert(stripped.to_string());
ancestry.push(stripped.to_string());
}
}
}
}
// Ensure we return Some vector, even if empty, to allow caller logic to proceed
Some(ancestry)
}
// Helper for a single branch: return the remote SHA if present on any remote
// and the distance (commits ahead of HEAD) for that branch. The first item is
// None if the branch is not present on any remote. Returns None if distance
// could not be computed due to git errors/timeouts.
async fn branch_remote_and_distance(
cwd: &Path,
branch: &str,
remotes: &[String],
) -> Option<(Option<GitSha>, usize)> {
// Try to find the first remote ref that exists for this branch (origin prioritized by caller).
let mut found_remote_sha: Option<GitSha> = None;
let mut found_remote_ref: Option<String> = None;
for remote in remotes {
let remote_ref = format!("refs/remotes/{remote}/{branch}");
let Some(verify_output) =
run_git_command_with_timeout(&["rev-parse", "--verify", "--quiet", &remote_ref], cwd)
.await
else {
// Mirror previous behavior: if the verify call times out/fails at the process level,
// treat the entire branch as unusable.
return None;
};
if !verify_output.status.success() {
continue;
}
let Ok(sha) = String::from_utf8(verify_output.stdout) else {
// Mirror previous behavior and skip the entire branch on parse failure.
return None;
};
found_remote_sha = Some(GitSha::new(sha.trim()));
found_remote_ref = Some(remote_ref);
break;
}
// Compute distance as the number of commits HEAD is ahead of the branch.
// Prefer local branch name if it exists; otherwise fall back to the remote ref (if any).
let count_output = if let Some(local_count) =
run_git_command_with_timeout(&["rev-list", "--count", &format!("{branch}..HEAD")], cwd)
.await
{
if local_count.status.success() {
local_count
} else if let Some(remote_ref) = &found_remote_ref {
match run_git_command_with_timeout(
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
cwd,
)
.await
{
Some(remote_count) => remote_count,
None => return None,
}
} else {
return None;
}
} else if let Some(remote_ref) = &found_remote_ref {
match run_git_command_with_timeout(
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
cwd,
)
.await
{
Some(remote_count) => remote_count,
None => return None,
}
} else {
return None;
};
if !count_output.status.success() {
return None;
}
let Ok(distance_str) = String::from_utf8(count_output.stdout) else {
return None;
};
let Ok(distance) = distance_str.trim().parse::<usize>() else {
return None;
};
Some((found_remote_sha, distance))
}
// Finds the closest sha that exist on any of branches and also exists on any of the remotes.
async fn find_closest_sha(cwd: &Path, branches: &[String], remotes: &[String]) -> Option<GitSha> {
// A sha and how many commits away from HEAD it is.
let mut closest_sha: Option<(GitSha, usize)> = None;
for branch in branches {
let Some((maybe_remote_sha, distance)) =
branch_remote_and_distance(cwd, branch, remotes).await
else {
continue;
};
let Some(remote_sha) = maybe_remote_sha else {
// Preserve existing behavior: skip branches that are not present on a remote.
continue;
};
match &closest_sha {
None => closest_sha = Some((remote_sha, distance)),
Some((_, best_distance)) if distance < *best_distance => {
closest_sha = Some((remote_sha, distance));
}
_ => {}
}
}
closest_sha.map(|(sha, _)| sha)
}
async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> {
let output = run_git_command_with_timeout(&["diff", &sha.0], cwd).await?;
// 0 is success and no diff.
// 1 is success but there is a diff.
let exit_ok = output.status.code().is_some_and(|c| c == 0 || c == 1);
if !exit_ok {
return None;
}
let mut diff = String::from_utf8(output.stdout).ok()?;
if let Some(untracked_output) =
run_git_command_with_timeout(&["ls-files", "--others", "--exclude-standard"], cwd).await
&& untracked_output.status.success()
{
let untracked: Vec<String> = String::from_utf8(untracked_output.stdout)
.ok()?
.lines()
.map(|s| s.to_string())
.filter(|s| !s.is_empty())
.collect();
if !untracked.is_empty() {
let futures_iter = untracked.into_iter().map(|file| async move {
let file_owned = file;
let args_vec: Vec<&str> =
vec!["diff", "--binary", "--no-index", "/dev/null", &file_owned];
run_git_command_with_timeout(&args_vec, cwd).await
});
let results = join_all(futures_iter).await;
for extra in results.into_iter().flatten() {
if extra.status.code().is_some_and(|c| c == 0 || c == 1)
&& let Ok(s) = String::from_utf8(extra.stdout)
{
diff.push_str(&s);
}
}
}
}
Some(diff)
}
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used)]
#![allow(clippy::unwrap_used)]
use super::*;
use std::fs;
@@ -435,8 +110,7 @@ mod tests {
// Helper function to create a test git repository
async fn create_test_git_repo(temp_dir: &TempDir) -> PathBuf {
let repo_path = temp_dir.path().join("repo");
fs::create_dir(&repo_path).expect("Failed to create repo dir");
let repo_path = temp_dir.path().to_path_buf();
let envs = vec![
("GIT_CONFIG_GLOBAL", "/dev/null"),
("GIT_CONFIG_NOSYSTEM", "1"),
@@ -491,41 +165,6 @@ mod tests {
repo_path
}
async fn create_test_git_repo_with_remote(temp_dir: &TempDir) -> (PathBuf, String) {
let repo_path = create_test_git_repo(temp_dir).await;
let remote_path = temp_dir.path().join("remote.git");
Command::new("git")
.args(["init", "--bare", remote_path.to_str().unwrap()])
.output()
.await
.expect("Failed to init bare remote");
Command::new("git")
.args(["remote", "add", "origin", remote_path.to_str().unwrap()])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to add remote");
let output = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to get branch");
let branch = String::from_utf8(output.stdout).unwrap().trim().to_string();
Command::new("git")
.args(["push", "-u", "origin", &branch])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to push initial commit");
(repo_path, branch)
}
#[tokio::test]
async fn test_collect_git_info_non_git_directory() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
@@ -639,136 +278,6 @@ mod tests {
assert_eq!(git_info.branch, Some("feature-branch".to_string()));
}
#[tokio::test]
async fn test_get_git_working_tree_state_clean_repo() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.is_empty());
}
#[tokio::test]
async fn test_get_git_working_tree_state_with_changes() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let tracked = repo_path.join("test.txt");
fs::write(&tracked, "modified").unwrap();
fs::write(repo_path.join("untracked.txt"), "new").unwrap();
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.contains("test.txt"));
assert!(state.diff.contains("untracked.txt"));
}
#[tokio::test]
async fn test_get_git_working_tree_state_branch_fallback() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, _branch) = create_test_git_repo_with_remote(&temp_dir).await;
Command::new("git")
.args(["checkout", "-b", "feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to create feature branch");
Command::new("git")
.args(["push", "-u", "origin", "feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to push feature branch");
Command::new("git")
.args(["checkout", "-b", "local-branch"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to create local branch");
let remote_sha = Command::new("git")
.args(["rev-parse", "origin/feature"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
}
#[tokio::test]
async fn test_get_git_working_tree_state_unpushed_commit() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
let remote_sha = Command::new("git")
.args(["rev-parse", &format!("origin/{branch}")])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to rev-parse remote");
let remote_sha = String::from_utf8(remote_sha.stdout)
.unwrap()
.trim()
.to_string();
fs::write(repo_path.join("test.txt"), "updated").unwrap();
Command::new("git")
.args(["add", "test.txt"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to add file");
Command::new("git")
.args(["commit", "-m", "local change"])
.current_dir(&repo_path)
.output()
.await
.expect("Failed to commit");
let state = git_diff_to_remote(&repo_path)
.await
.expect("Should collect working tree state");
assert_eq!(state.sha, GitSha::new(&remote_sha));
assert!(state.diff.contains("updated"));
}
#[test]
fn test_git_info_serialization() {
let git_info = GitInfo {

View File

@@ -12,17 +12,20 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
// introduce side effects ( "&&", "||", ";", and "|" ). If every
// individual command in the script is itself a knownsafe command, then
// the composite expression is considered safe.
if let [bash, flag, script] = command
&& bash == "bash"
&& flag == "-lc"
&& let Some(tree) = try_parse_bash(script)
&& let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script)
&& !all_commands.is_empty()
&& all_commands
.iter()
.all(|cmd| is_safe_to_call_with_exec(cmd))
{
return true;
if let [bash, flag, script] = command {
if bash == "bash" && flag == "-lc" {
if let Some(tree) = try_parse_bash(script) {
if let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script) {
if !all_commands.is_empty()
&& all_commands
.iter()
.all(|cmd| is_safe_to_call_with_exec(cmd))
{
return true;
}
}
}
}
}
false
@@ -159,6 +162,7 @@ fn is_valid_sed_n_arg(arg: Option<&str>) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
fn vec_str(args: &[&str]) -> Vec<String> {

View File

@@ -1,66 +0,0 @@
use crate::protocol::SandboxPolicy;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tokio::process::Child;
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
/// (codex-linux-sandbox).
///
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
/// the equivalent CLI options.
pub async fn spawn_command_under_linux_sandbox<P>(
codex_linux_sandbox_exe: P,
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child>
where
P: AsRef<Path>,
{
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(
codex_linux_sandbox_exe.as_ref().to_path_buf(),
args,
arg0,
cwd,
sandbox_policy,
stdio_policy,
env,
)
.await
}
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
fn create_linux_sandbox_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
) -> Vec<String> {
#[expect(clippy::expect_used)]
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
#[expect(clippy::expect_used)]
let sandbox_policy_json =
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
let mut linux_cmd: Vec<String> = vec![
sandbox_policy_cwd,
sandbox_policy_json,
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
"--".to_string(),
];
// Append the original tool command.
linux_cmd.extend(command);
linux_cmd
}

View File

@@ -11,20 +11,19 @@ mod chat_completions;
mod client;
mod client_common;
pub mod codex;
mod codex_conversation;
pub use codex_conversation::CodexConversation;
pub use codex::Codex;
pub use codex::CodexSpawnOk;
pub mod codex_wrapper;
pub mod config;
pub mod config_profile;
pub mod config_types;
mod conversation_history;
mod environment_context;
pub mod error;
pub mod exec;
pub mod exec_env;
mod flags;
pub mod git_info;
mod is_safe_command;
pub mod landlock;
mod mcp_connection_manager;
mod mcp_tool_call;
mod message_history;
@@ -35,30 +34,20 @@ pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;
pub use model_provider_info::create_oss_provider_with_base_url;
mod conversation_manager;
pub use conversation_manager::ConversationManager;
pub use conversation_manager::NewConversation;
pub mod model_family;
mod models;
mod openai_model_info;
mod openai_tools;
pub mod plan_tool;
mod project_doc;
pub mod protocol;
mod rollout;
pub(crate) mod safety;
pub mod seatbelt;
pub mod shell;
pub mod spawn;
pub mod terminal;
pub mod turn_diff_tracker;
pub mod user_agent;
mod user_notification;
pub mod util;
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
pub use safety::get_platform_sandbox;
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
// `codex_core::protocol::...` references continue to work across the workspace.
pub use codex_protocol::protocol;
// Re-export protocol config enums to ensure call sites can use the same types
// as those in the protocol crate when constructing protocol messages.
pub use codex_protocol::config_types as protocol_config_types;

View File

@@ -281,6 +281,7 @@ fn is_valid_mcp_server_name(server_name: &str) -> bool {
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use mcp_types::ToolInputSchema;

View File

@@ -34,8 +34,6 @@ use crate::config_types::HistoryPersistence;
use std::os::unix::fs::OpenOptionsExt;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
/// Filename that stores the message history inside `~/.codex`.
const HISTORY_FILENAME: &str = "history.jsonl";
@@ -127,12 +125,11 @@ pub(crate) async fn append_entry(text: &str, session_id: &Uuid, config: &Config)
/// times if the lock is currently held by another process. This prevents a
/// potential indefinite wait while still giving other writers some time to
/// finish their operation.
#[cfg(unix)]
async fn acquire_exclusive_lock_with_retry(file: &File) -> Result<()> {
async fn acquire_exclusive_lock_with_retry(file: &std::fs::File) -> Result<()> {
use tokio::time::sleep;
for _ in 0..MAX_RETRIES {
match try_flock_exclusive(file) {
match fs2::FileExt::try_lock_exclusive(file) {
Ok(()) => return Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
sleep(RETRY_SLEEP).await;
@@ -147,12 +144,6 @@ async fn acquire_exclusive_lock_with_retry(file: &File) -> Result<()> {
))
}
#[cfg(not(unix))]
async fn acquire_exclusive_lock_with_retry(_file: &File) -> Result<()> {
// On non-Unix, skip locking; appends are still atomic with O_APPEND.
Ok(())
}
/// Asynchronously fetch the history file's *identifier* (inode on Unix) and
/// the current number of entries by counting newline characters.
pub(crate) async fn history_metadata(config: &Config) -> (u64, usize) {
@@ -268,7 +259,7 @@ pub(crate) fn lookup(log_id: u64, offset: usize, config: &Config) -> Option<Hist
#[cfg(unix)]
fn acquire_shared_lock_with_retry(file: &File) -> Result<()> {
for _ in 0..MAX_RETRIES {
match try_flock_shared(file) {
match fs2::FileExt::try_lock_shared(file) {
Ok(()) => return Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
std::thread::sleep(RETRY_SLEEP);
@@ -283,45 +274,6 @@ fn acquire_shared_lock_with_retry(file: &File) -> Result<()> {
))
}
#[cfg(not(unix))]
fn acquire_shared_lock_with_retry(_file: &File) -> Result<()> {
Ok(())
}
#[cfg(unix)]
fn try_flock_exclusive(file: &File) -> Result<()> {
let fd = file.as_raw_fd();
let rc = unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) };
if rc == 0 {
Ok(())
} else {
let err = std::io::Error::last_os_error();
match err.raw_os_error() {
Some(code) if code == libc::EWOULDBLOCK || code == libc::EAGAIN => Err(
std::io::Error::new(std::io::ErrorKind::WouldBlock, "lock would block"),
),
_ => Err(err),
}
}
}
#[cfg(unix)]
fn try_flock_shared(file: &File) -> Result<()> {
let fd = file.as_raw_fd();
let rc = unsafe { libc::flock(fd, libc::LOCK_SH | libc::LOCK_NB) };
if rc == 0 {
Ok(())
} else {
let err = std::io::Error::last_os_error();
match err.raw_os_error() {
Some(code) if code == libc::EWOULDBLOCK || code == libc::EAGAIN => Err(
std::io::Error::new(std::io::ErrorKind::WouldBlock, "lock would block"),
),
_ => Err(err),
}
}
}
/// On Unix systems ensure the file permissions are `0o600` (rw-------). If the
/// permissions cannot be changed the error is propagated to the caller.
#[cfg(unix)]

View File

@@ -23,10 +23,6 @@ pub struct ModelFamily {
// the model such that its description can be omitted.
// See https://platform.openai.com/docs/guides/tools-local-shell
pub uses_local_shell_tool: bool,
/// True if the model performs better when `apply_patch` is provided as
/// a tool call instead of just a bash command.
pub uses_apply_patch_tool: bool,
}
macro_rules! model_family {
@@ -40,7 +36,6 @@ macro_rules! model_family {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
};
// apply overrides
$(
@@ -60,7 +55,6 @@ macro_rules! simple_model_family {
needs_special_apply_patch_instructions: false,
supports_reasoning_summaries: false,
uses_local_shell_tool: false,
uses_apply_patch_tool: false,
})
}};
}
@@ -84,20 +78,15 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
supports_reasoning_summaries: true,
uses_local_shell_tool: true,
)
} else if slug.starts_with("codex-") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
)
} else if slug.starts_with("gpt-4.1") {
model_family!(
slug, "gpt-4.1",
needs_special_apply_patch_instructions: true,
)
} else if slug.starts_with("gpt-oss") {
model_family!(slug, "gpt-oss", uses_apply_patch_tool: true)
} else if slug.starts_with("gpt-4o") {
simple_model_family!(slug, "gpt-4o")
} else if slug.starts_with("gpt-oss") {
simple_model_family!(slug, "gpt-oss")
} else if slug.starts_with("gpt-3.5") {
simple_model_family!(slug, "gpt-3.5")
} else if slug.starts_with("gpt-5") {

View File

@@ -167,10 +167,10 @@ impl ModelProviderInfo {
if let Some(env_headers) = &self.env_http_headers {
for (header, env_var) in env_headers {
if let Ok(val) = std::env::var(env_var)
&& !val.trim().is_empty()
{
builder = builder.header(header, val);
if let Ok(val) = std::env::var(env_var) {
if !val.trim().is_empty() {
builder = builder.header(header, val);
}
}
}
}
@@ -322,6 +322,7 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;

View File

@@ -45,7 +45,7 @@ pub enum ResponseItem {
Reasoning {
id: String,
summary: Vec<ReasoningItemReasoningSummary>,
#[serde(default, skip_serializing_if = "should_serialize_reasoning_content")]
#[serde(default, skip_serializing_if = "Option::is_none")]
content: Option<Vec<ReasoningItemContent>>,
encrypted_content: Option<String>,
},
@@ -81,15 +81,6 @@ pub enum ResponseItem {
Other,
}
fn should_serialize_reasoning_content(content: &Option<Vec<ReasoningItemContent>>) -> bool {
match content {
Some(content) => !content
.iter()
.any(|c| matches!(c, ReasoningItemContent::ReasoningText { .. })),
None => false,
}
}
impl From<ResponseInputItem> for ResponseItem {
fn from(item: ResponseInputItem) -> Self {
match item {
@@ -151,7 +142,6 @@ pub enum ReasoningItemReasoningSummary {
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ReasoningItemContent {
ReasoningText { text: String },
Text { text: String },
}
impl From<Vec<InputItem>> for ResponseInputItem {
@@ -183,7 +173,6 @@ impl From<Vec<InputItem>> for ResponseInputItem {
None
}
},
_ => None,
})
.collect::<Vec<ContentItem>>(),
}
@@ -267,6 +256,7 @@ impl std::ops::Deref for FunctionCallOutputPayload {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]

View File

@@ -15,8 +15,7 @@ pub(crate) struct ModelInfo {
}
pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
let slug = model_family.slug.as_str();
match slug {
match model_family.slug.as_str() {
// OSS models have a 128k shared token pool.
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
// https://openai.com/index/gpt-oss-model-card/
@@ -83,11 +82,6 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
max_output_tokens: 100_000,
}),
_ if slug.starts_with("codex-") => Some(ModelInfo {
context_window: 200_000,
max_output_tokens: 100_000,
}),
_ => None,
}
}

View File

@@ -43,7 +43,6 @@ pub enum ConfigShellToolType {
pub struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool: bool,
}
impl ToolsConfig {
@@ -52,7 +51,6 @@ impl ToolsConfig {
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
include_plan_tool: bool,
include_apply_patch_tool: bool,
) -> Self {
let mut shell_type = if model_family.uses_local_shell_tool {
ConfigShellToolType::LocalShell
@@ -68,7 +66,6 @@ impl ToolsConfig {
Self {
shell_type,
plan_tool: include_plan_tool,
apply_patch_tool: include_apply_patch_tool || model_family.uses_apply_patch_tool,
}
}
}
@@ -238,87 +235,6 @@ The shell tool is used to execute shell commands.
})
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ApplyPatchToolArgs {
pub(crate) input: String,
}
fn create_apply_patch_tool() -> OpenAiTool {
// Minimal schema: one required string argument containing the patch body
let mut properties = BTreeMap::new();
properties.insert(
"input".to_string(),
JsonSchema::String {
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
},
);
OpenAiTool::Function(ResponsesApiTool {
name: "apply_patch".to_string(),
description: r#"Use this tool to edit files.
Your patch language is a strippeddown, fileoriented diff format designed to be easy to parse and safe to apply. You can think of it as a highlevel envelope:
**_ Begin Patch
[ one or more file sections ]
_** End Patch
Within that envelope, you get a sequence of file operations.
You MUST include a header to specify the action you are taking.
Each operation starts with one of three headers:
**_ Add File: <path> - create a new file. Every following line is a + line (the initial contents).
_** Delete File: <path> - remove an existing file. Nothing follows.
\*\*\* Update File: <path> - patch an existing file in place (optionally with a rename).
May be immediately followed by \*\*\* Move to: <new path> if you want to rename the file.
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
Within a hunk each line starts with:
- for inserted text,
* for removed text, or
space ( ) for context.
At the end of a truncated hunk you can emit \*\*\* End of File.
Patch := Begin { FileOp } End
Begin := "**_ Begin Patch" NEWLINE
End := "_** End Patch" NEWLINE
FileOp := AddFile | DeleteFile | UpdateFile
AddFile := "**_ Add File: " path NEWLINE { "+" line NEWLINE }
DeleteFile := "_** Delete File: " path NEWLINE
UpdateFile := "**_ Update File: " path NEWLINE [ MoveTo ] { Hunk }
MoveTo := "_** Move to: " newPath NEWLINE
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
HunkLine := (" " | "-" | "+") text NEWLINE
A full patch can combine several operations:
**_ Begin Patch
_** Add File: hello.txt
+Hello world
**_ Update File: src/app.py
_** Move to: src/main.py
@@ def greet():
-print("Hi")
+print("Hello, world!")
**_ Delete File: obsolete.txt
_** End Patch
It is important to remember:
- You must include a header with your intended action (Add/Delete/Update)
- You must prefix new lines with `+` even when creating a new file
"#
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["input".to_string()]),
additional_properties: Some(false),
},
})
}
/// Returns JSON values that are compatible with Function Calling in the
/// Responses API:
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
@@ -420,11 +336,11 @@ fn sanitize_json_schema(value: &mut JsonValue) {
}
JsonValue::Object(map) => {
// First, recursively sanitize known nested schema holders
if let Some(props) = map.get_mut("properties")
&& let Some(props_map) = props.as_object_mut()
{
for (_k, v) in props_map.iter_mut() {
sanitize_json_schema(v);
if let Some(props) = map.get_mut("properties") {
if let Some(props_map) = props.as_object_mut() {
for (_k, v) in props_map.iter_mut() {
sanitize_json_schema(v);
}
}
}
if let Some(items) = map.get_mut("items") {
@@ -444,18 +360,18 @@ fn sanitize_json_schema(value: &mut JsonValue) {
.map(|s| s.to_string());
// If type is an array (union), pick first supported; else leave to inference
if ty.is_none()
&& let Some(JsonValue::Array(types)) = map.get("type")
{
for t in types {
if let Some(tt) = t.as_str()
&& matches!(
tt,
"object" | "array" | "string" | "number" | "integer" | "boolean"
)
{
ty = Some(tt.to_string());
break;
if ty.is_none() {
if let Some(JsonValue::Array(types)) = map.get("type") {
for t in types {
if let Some(tt) = t.as_str() {
if matches!(
tt,
"object" | "array" | "string" | "number" | "integer" | "boolean"
) {
ty = Some(tt.to_string());
break;
}
}
}
}
}
@@ -539,10 +455,6 @@ pub(crate) fn get_openai_tools(
tools.push(PLAN_TOOL.clone());
}
if config.apply_patch_tool {
tools.push(create_apply_patch_tool());
}
if let Some(mcp_tools) = mcp_tools {
for (name, tool) in mcp_tools {
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
@@ -558,6 +470,7 @@ pub(crate) fn get_openai_tools(
}
#[cfg(test)]
#[allow(clippy::expect_used)]
mod tests {
use crate::model_family::find_family_for_model;
use mcp_types::ToolInputSchema;
@@ -596,7 +509,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
true,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -611,7 +523,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
true,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -626,7 +537,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
&config,
@@ -720,7 +630,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -776,7 +685,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -827,7 +735,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(
@@ -881,7 +788,6 @@ mod tests {
AskForApproval::Never,
SandboxPolicy::ReadOnly,
false,
model_family.uses_apply_patch_tool,
);
let tools = get_openai_tools(

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,9 @@
use std::collections::BTreeMap;
use std::sync::LazyLock;
use serde::Deserialize;
use serde::Serialize;
use crate::codex::Session;
use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
@@ -10,22 +13,36 @@ use crate::openai_tools::ResponsesApiTool;
use crate::protocol::Event;
use crate::protocol::EventMsg;
// Use the canonical plan tool types from the protocol crate to ensure
// type-identity matches events transported via `codex_protocol`.
pub use codex_protocol::plan_tool::PlanItemArg;
pub use codex_protocol::plan_tool::StepStatus;
pub use codex_protocol::plan_tool::UpdatePlanArgs;
// Types for the TODO tool arguments matching codex-vscode/todo-mcp/src/main.rs
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StepStatus {
Pending,
InProgress,
Completed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct PlanItemArg {
pub step: String,
pub status: StepStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct UpdatePlanArgs {
#[serde(default)]
pub explanation: Option<String>,
pub plan: Vec<PlanItemArg>,
}
pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let mut plan_item_props = BTreeMap::new();
plan_item_props.insert("step".to_string(), JsonSchema::String { description: None });
plan_item_props.insert(
"status".to_string(),
JsonSchema::String {
description: Some("One of: pending, in_progress, completed".to_string()),
},
JsonSchema::String { description: None },
);
let plan_items_schema = JsonSchema::Array {
@@ -46,11 +63,17 @@ pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
OpenAiTool::Function(ResponsesApiTool {
name: "update_plan".to_string(),
description: r#"Updates the task plan.
Provide an optional explanation and a list of plan items, each with a step and status.
At most one step can be in_progress at a time.
"#
.to_string(),
description: r#"Use the update_plan tool to keep the user updated on the current plan for the task.
After understanding the user's task, call the update_plan tool with an initial plan. An example of a plan:
1. Explore the codebase to find relevant files (status: in_progress)
2. Implement the feature in the XYZ component (status: pending)
3. Commit changes and make a pull request (status: pending)
Each step should be a short, 1-sentence description.
Until all the steps are finished, there should always be exactly one in_progress step in the plan.
Call the update_plan tool whenever you finish a step, marking the completed step as `completed` and marking the next step as `in_progress`.
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step.
Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
When all steps are completed, call update_plan one last time with all steps marked as `completed`."#.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,

View File

@@ -134,6 +134,8 @@ async fn load_first_candidate(
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;

View File

@@ -11,17 +11,16 @@ use std::str::FromStr;
use std::time::Duration;
use mcp_types::CallToolResult;
use mcp_types::Tool as McpTool;
use serde::Deserialize;
use serde::Serialize;
use serde_bytes::ByteBuf;
use strum_macros::Display;
use ts_rs::TS;
use uuid::Uuid;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::message_history::HistoryEntry;
use crate::model_provider_info::ModelProviderInfo;
use crate::parse_command::ParsedCommand;
use crate::plan_tool::UpdatePlanArgs;
@@ -40,8 +39,54 @@ pub struct Submission {
#[allow(clippy::large_enum_variant)]
#[non_exhaustive]
pub enum Op {
/// Configure the model session.
ConfigureSession {
/// Provider identifier ("openai", "openrouter", ...).
provider: ModelProviderInfo,
/// If not specified, server will use its default model.
model: String,
model_reasoning_effort: ReasoningEffortConfig,
model_reasoning_summary: ReasoningSummaryConfig,
/// Model instructions that are appended to the base instructions.
user_instructions: Option<String>,
/// Base instructions override.
base_instructions: Option<String>,
/// When to escalate for approval for execution
approval_policy: AskForApproval,
/// How to sandbox commands executed in the system
sandbox_policy: SandboxPolicy,
/// Disable server-side response storage (send full context each request)
#[serde(default)]
disable_response_storage: bool,
/// Optional external notifier command tokens. Present only when the
/// client wants the agent to spawn a program after each completed
/// turn.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
notify: Option<Vec<String>>,
/// Working directory that should be treated as the *root* of the
/// session. All relative paths supplied by the model as well as the
/// execution sandbox are resolved against this directory **instead**
/// of the process-wide current working directory. CLI front-ends are
/// expected to expand this to an absolute path before sending the
/// `ConfigureSession` operation so that the business-logic layer can
/// operate deterministically.
cwd: std::path::PathBuf,
/// Path to a rollout file to resume from.
#[serde(skip_serializing_if = "Option::is_none")]
resume_path: Option<std::path::PathBuf>,
},
/// Abort current task.
/// This server sends [`EventMsg::TurnAborted`] in response.
/// This server sends no corresponding Event
Interrupt,
/// Input from the user
@@ -50,65 +95,6 @@ pub enum Op {
items: Vec<InputItem>,
},
/// Similar to [`Op::UserInput`], but contains additional context required
/// for a turn of a [`crate::codex_conversation::CodexConversation`].
UserTurn {
/// User input items, see `InputItem`
items: Vec<InputItem>,
/// `cwd` to use with the [`SandboxPolicy`] and potentially tool calls
/// such as `local_shell`.
cwd: PathBuf,
/// Policy to use for command approval.
approval_policy: AskForApproval,
/// Policy to use for tool calls such as `local_shell`.
sandbox_policy: SandboxPolicy,
/// Must be a valid model slug for the [`crate::client::ModelClient`]
/// associated with this conversation.
model: String,
/// Will only be honored if the model is configured to use reasoning.
effort: ReasoningEffortConfig,
/// Will only be honored if the model is configured to use reasoning.
summary: ReasoningSummaryConfig,
},
/// Override parts of the persistent turn context for subsequent turns.
///
/// All fields are optional; when omitted, the existing value is preserved.
/// This does not enqueue any input it only updates defaults used for
/// future `UserInput` turns.
OverrideTurnContext {
/// Updated `cwd` for sandbox/tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
cwd: Option<PathBuf>,
/// Updated command approval policy.
#[serde(skip_serializing_if = "Option::is_none")]
approval_policy: Option<AskForApproval>,
/// Updated sandbox policy for tool calls.
#[serde(skip_serializing_if = "Option::is_none")]
sandbox_policy: Option<SandboxPolicy>,
/// Updated model slug. When set, the model family is derived
/// automatically.
#[serde(skip_serializing_if = "Option::is_none")]
model: Option<String>,
/// Updated reasoning effort (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
effort: Option<ReasoningEffortConfig>,
/// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")]
summary: Option<ReasoningSummaryConfig>,
},
/// Approve a command execution
ExecApproval {
/// The id of the submission we are approving
@@ -137,10 +123,6 @@ pub enum Op {
/// Request a single history entry identified by `log_id` + `offset`.
GetHistoryEntryRequest { offset: usize, log_id: u64 },
/// Request the list of MCP tools available across all configured servers.
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
ListMcpTools,
/// Request the agent to summarize the current conversation context.
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
@@ -151,7 +133,7 @@ pub enum Op {
/// Determines the conditions under which the user is consulted to approve
/// running the command proposed by Codex.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize, Display, TS)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize, Display)]
#[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum AskForApproval {
@@ -178,7 +160,7 @@ pub enum AskForApproval {
}
/// Determines execution restrictions for model shell commands.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display, TS)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Display)]
#[strum(serialize_all = "kebab-case")]
#[serde(tag = "mode", rename_all = "kebab-case")]
pub enum SandboxPolicy {
@@ -223,31 +205,10 @@ pub enum SandboxPolicy {
/// not modified by the agent.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WritableRoot {
/// Absolute path, by construction.
pub root: PathBuf,
/// Also absolute paths, by construction.
pub read_only_subpaths: Vec<PathBuf>,
}
impl WritableRoot {
pub fn is_path_writable(&self, path: &Path) -> bool {
// Check if the path is under the root.
if !path.starts_with(&self.root) {
return false;
}
// Check if the path is under any of the read-only subpaths.
for subpath in &self.read_only_subpaths {
if path.starts_with(subpath) {
return false;
}
}
true
}
}
impl FromStr for SandboxPolicy {
type Err = serde_json::Error;
@@ -274,7 +235,8 @@ impl SandboxPolicy {
}
}
/// Always returns `true`; restricting read access is not supported.
/// Always returns `true` for now, as we do not yet support restricting read
/// access.
pub fn has_full_disk_read_access(&self) -> bool {
true
}
@@ -422,8 +384,6 @@ pub enum EventMsg {
/// Agent reasoning content delta event from agent.
AgentReasoningRawContentDelta(AgentReasoningRawContentDeltaEvent),
/// Signaled when the model begins a new reasoning summary section (e.g., a new titled block).
AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent),
/// Ack the client's configure message.
SessionConfigured(SessionConfiguredEvent),
@@ -458,13 +418,8 @@ pub enum EventMsg {
/// Response to GetHistoryEntryRequest.
GetHistoryEntryResponse(GetHistoryEntryResponseEvent),
/// List of MCP tools available to the agent.
McpListToolsResponse(McpListToolsResponseEvent),
PlanUpdate(UpdatePlanArgs),
TurnAborted(TurnAbortedEvent),
/// Notification that the agent is shutting down.
ShutdownComplete,
}
@@ -516,33 +471,6 @@ impl TokenUsage {
self.total_tokens
.saturating_sub(self.reasoning_output_tokens.unwrap_or(0))
}
/// Estimate the remaining user-controllable percentage of the model's context window.
///
/// `context_window` is the total size of the model's context window.
/// `baseline_used_tokens` should capture tokens that are always present in
/// the context (e.g., system prompt and fixed tool instructions) so that
/// the percentage reflects the portion the user can influence.
///
/// This normalizes both the numerator and denominator by subtracting the
/// baseline, so immediately after the first prompt the UI shows 100% left
/// and trends toward 0% as the user fills the effective window.
pub fn percent_of_context_window_remaining(
&self,
context_window: u64,
baseline_used_tokens: u64,
) -> u8 {
if context_window <= baseline_used_tokens {
return 0;
}
let effective_window = context_window - baseline_used_tokens;
let used = self
.tokens_in_context_window()
.saturating_sub(baseline_used_tokens);
let remaining = effective_window.saturating_sub(used);
((remaining as f32 / effective_window as f32) * 100.0).clamp(0.0, 100.0) as u8
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
@@ -603,9 +531,6 @@ pub struct AgentReasoningRawContentDeltaEvent {
pub delta: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct AgentReasoningSectionBreakEvent {}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct AgentReasoningDeltaEvent {
pub delta: String,
@@ -757,13 +682,6 @@ pub struct GetHistoryEntryResponseEvent {
pub entry: Option<HistoryEntry>,
}
/// Response payload for `Op::ListMcpTools`.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpListToolsResponseEvent {
/// Fully qualified tool name -> tool definition.
pub tools: std::collections::HashMap<String, McpTool>,
}
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
pub struct SessionConfiguredEvent {
/// Unique id for this session.
@@ -780,7 +698,7 @@ pub struct SessionConfiguredEvent {
}
/// User's decision in response to an ExecApprovalRequest.
#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, TS)]
#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum ReviewDecision {
/// User has approved this command and the agent should execute it.
@@ -801,7 +719,7 @@ pub enum ReviewDecision {
Abort,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum FileChange {
Add {
@@ -822,20 +740,9 @@ pub struct Chunk {
pub inserted_lines: Vec<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct TurnAbortedEvent {
pub reason: TurnAbortReason,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, TS)]
#[serde(rename_all = "snake_case")]
pub enum TurnAbortReason {
Interrupted,
Replaced,
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
/// Serialize Event to verify that its JSON representation has the expected

View File

@@ -21,7 +21,7 @@ pub enum SafetyCheck {
pub fn assess_patch_safety(
action: &ApplyPatchAction,
policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
writable_roots: &[PathBuf],
cwd: &Path,
) -> SafetyCheck {
if action.is_empty() {
@@ -45,7 +45,7 @@ pub fn assess_patch_safety(
// is possible that paths in the patch are hard links to files outside the
// writable roots, so we should still run `apply_patch` in a sandbox in that
// case.
if is_write_patch_constrained_to_writable_paths(action, sandbox_policy, cwd)
if is_write_patch_constrained_to_writable_paths(action, writable_roots, cwd)
|| policy == AskForApproval::OnFailure
{
// Only autoapprove when we can actually enforce a sandbox. Otherwise
@@ -171,19 +171,13 @@ pub fn get_platform_sandbox() -> Option<SandboxType> {
fn is_write_patch_constrained_to_writable_paths(
action: &ApplyPatchAction,
sandbox_policy: &SandboxPolicy,
writable_roots: &[PathBuf],
cwd: &Path,
) -> bool {
// Earlyexit if there are no declared writable roots.
let writable_roots = match sandbox_policy {
SandboxPolicy::ReadOnly => {
return false;
}
SandboxPolicy::DangerFullAccess => {
return true;
}
SandboxPolicy::WorkspaceWrite { .. } => sandbox_policy.get_writable_roots_with_cwd(cwd),
};
if writable_roots.is_empty() {
return false;
}
// Normalize a path by removing `.` and resolving `..` without touching the
// filesystem (works even if the file does not exist).
@@ -215,9 +209,15 @@ fn is_write_patch_constrained_to_writable_paths(
None => return false,
};
writable_roots
.iter()
.any(|writable_root| writable_root.is_path_writable(&abs))
writable_roots.iter().any(|root| {
let root_abs = if root.is_absolute() {
root.clone()
} else {
normalize(&cwd.join(root)).unwrap_or_else(|| cwd.join(root))
};
abs.starts_with(&root_abs)
})
};
for (path, change) in action.changes() {
@@ -231,10 +231,10 @@ fn is_write_patch_constrained_to_writable_paths(
if !is_path_writable(path) {
return false;
}
if let Some(dest) = move_path
&& !is_path_writable(dest)
{
return false;
if let Some(dest) = move_path {
if !is_path_writable(dest) {
return false;
}
}
}
}
@@ -245,57 +245,40 @@ fn is_write_patch_constrained_to_writable_paths(
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use tempfile::TempDir;
#[test]
fn test_writable_roots_constraint() {
// Use a temporary directory as our workspace to avoid touching
// the real current working directory.
let tmp = TempDir::new().unwrap();
let cwd = tmp.path().to_path_buf();
let cwd = std::env::current_dir().unwrap();
let parent = cwd.parent().unwrap().to_path_buf();
// Helper to build a singleentry patch that adds a file at `p`.
// Helper to build a singleentry map representing a patch that adds a
// file at `p`.
let make_add_change = |p: PathBuf| ApplyPatchAction::new_add_for_test(&p, "".to_string());
let add_inside = make_add_change(cwd.join("inner.txt"));
let add_outside = make_add_change(parent.join("outside.txt"));
// Policy limited to the workspace only; exclude system temp roots so
// only `cwd` is writable by default.
let policy_workspace_only = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
assert!(is_write_patch_constrained_to_writable_paths(
&add_inside,
&policy_workspace_only,
&[PathBuf::from(".")],
&cwd,
));
let add_outside_2 = make_add_change(parent.join("outside.txt"));
assert!(!is_write_patch_constrained_to_writable_paths(
&add_outside,
&policy_workspace_only,
&add_outside_2,
&[PathBuf::from(".")],
&cwd,
));
// With the parent dir explicitly added as a writable root, the
// outside write should be permitted.
let policy_with_parent = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![parent.clone()],
network_access: false,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
// With parent dir added as writable root, it should pass.
assert!(is_write_patch_constrained_to_writable_paths(
&add_outside,
&policy_with_parent,
&[PathBuf::from("..")],
&cwd,
));
))
}
#[test]

View File

@@ -122,6 +122,7 @@ fn create_seatbelt_command_args(
#[cfg(test)]
mod tests {
#![expect(clippy::expect_used)]
use super::MACOS_SEATBELT_BASE_POLICY;
use super::create_seatbelt_command_args;
use crate::protocol::SandboxPolicy;

View File

@@ -70,13 +70,13 @@ pub async fn default_user_shell() -> Shell {
}
let stdout = String::from_utf8_lossy(&o.stdout);
for line in stdout.lines() {
if let Some(shell_path) = line.strip_prefix("UserShell: ")
&& shell_path.ends_with("/zsh")
{
return Shell::Zsh(ZshShell {
shell_path: shell_path.to_string(),
zshrc_path: format!("{home}/.zshrc"),
});
if let Some(shell_path) = line.strip_prefix("UserShell: ") {
if shell_path.ends_with("/zsh") {
return Shell::Zsh(ZshShell {
shell_path: shell_path.to_string(),
zshrc_path: format!("{home}/.zshrc"),
});
}
}
}
@@ -98,6 +98,7 @@ mod tests {
use std::process::Command;
#[tokio::test]
#[expect(clippy::unwrap_used)]
async fn test_current_shell_detects_zsh() {
let shell = Command::new("sh")
.arg("-c")
@@ -128,6 +129,7 @@ mod tests {
assert_eq!(actual_cmd, None);
}
#[expect(clippy::unwrap_used)]
#[tokio::test]
async fn test_run_with_profile_escaping_and_execution() {
let shell_path = "/bin/zsh";
@@ -165,6 +167,9 @@ mod tests {
for (input, expected_cmd, expected_output) in cases {
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Notify;
use crate::exec::ExecParams;
use crate::exec::SandboxType;
@@ -214,6 +219,7 @@ mod tests {
justification: None,
},
SandboxType::None,
Arc::new(Notify::new()),
&SandboxPolicy::DangerFullAccess,
&None,
None,

View File

@@ -1,72 +0,0 @@
use std::sync::OnceLock;
static TERMINAL: OnceLock<String> = OnceLock::new();
pub fn user_agent() -> String {
TERMINAL.get_or_init(detect_terminal).to_string()
}
/// Sanitize a header value to be used in a User-Agent string.
///
/// This function replaces any characters that are not allowed in a User-Agent string with an underscore.
///
/// # Arguments
///
/// * `value` - The value to sanitize.
fn is_valid_header_value_char(c: char) -> bool {
c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.' || c == '/'
}
fn sanitize_header_value(value: String) -> String {
value.replace(|c| !is_valid_header_value_char(c), "_")
}
fn detect_terminal() -> String {
sanitize_header_value(
if let Ok(tp) = std::env::var("TERM_PROGRAM")
&& !tp.trim().is_empty()
{
let ver = std::env::var("TERM_PROGRAM_VERSION").ok();
match ver {
Some(v) if !v.trim().is_empty() => format!("{tp}/{v}"),
_ => tp,
}
} else if let Ok(v) = std::env::var("WEZTERM_VERSION") {
if !v.trim().is_empty() {
format!("WezTerm/{v}")
} else {
"WezTerm".to_string()
}
} else if std::env::var("KITTY_WINDOW_ID").is_ok()
|| std::env::var("TERM")
.map(|t| t.contains("kitty"))
.unwrap_or(false)
{
"kitty".to_string()
} else if std::env::var("ALACRITTY_SOCKET").is_ok()
|| std::env::var("TERM")
.map(|t| t == "alacritty")
.unwrap_or(false)
{
"Alacritty".to_string()
} else if let Ok(v) = std::env::var("KONSOLE_VERSION") {
if !v.trim().is_empty() {
format!("Konsole/{v}")
} else {
"Konsole".to_string()
}
} else if std::env::var("GNOME_TERMINAL_SCREEN").is_ok() {
return "gnome-terminal".to_string();
} else if let Ok(v) = std::env::var("VTE_VERSION") {
if !v.trim().is_empty() {
format!("VTE/{v}")
} else {
"VTE".to_string()
}
} else if std::env::var("WT_SESSION").is_ok() {
return "WindowsTerminal".to_string();
} else {
std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string())
},
)
}

View File

@@ -466,6 +466,7 @@ fn is_windows_drive_or_unc_root(p: &std::path::Path) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;

View File

@@ -1,37 +0,0 @@
const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
pub fn get_codex_user_agent(originator: Option<&str>) -> String {
let build_version = env!("CARGO_PKG_VERSION");
let os_info = os_info::get();
format!(
"{}/{build_version} ({} {}; {}) {}",
originator.unwrap_or(DEFAULT_ORIGINATOR),
os_info.os_type(),
os_info.version(),
os_info.architecture().unwrap_or("unknown"),
crate::terminal::user_agent()
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_codex_user_agent() {
let user_agent = get_codex_user_agent(None);
assert!(user_agent.starts_with("codex_cli_rs/"));
}
#[test]
#[cfg(target_os = "macos")]
fn test_macos() {
use regex_lite::Regex;
let user_agent = get_codex_user_agent(None);
let re = Regex::new(
r"^codex_cli_rs/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\) (\S+)$",
)
.unwrap();
assert!(re.is_match(&user_agent));
}
}

View File

@@ -20,6 +20,7 @@ pub(crate) enum UserNotification {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]

View File

@@ -1,11 +1,32 @@
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use rand::Rng;
use tokio::sync::Notify;
use tracing::debug;
const INITIAL_DELAY_MS: u64 = 200;
const BACKOFF_FACTOR: f64 = 2.0;
/// Make a CancellationToken that is fulfilled when SIGINT occurs.
pub fn notify_on_sigint() -> Arc<Notify> {
let notify = Arc::new(Notify::new());
tokio::spawn({
let notify = Arc::clone(&notify);
async move {
loop {
tokio::signal::ctrl_c().await.ok();
debug!("Keyboard interrupt");
notify.notify_waiters();
}
}
});
notify
}
pub(crate) fn backoff(attempt: u64) -> Duration {
let exp = BACKOFF_FACTOR.powi(attempt.saturating_sub(1) as i32);
let base = (INITIAL_DELAY_MS as f64 * exp) as u64;

View File

@@ -1,3 +1,5 @@
#![expect(clippy::unwrap_used)]
use assert_cmd::Command as AssertCommand;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use std::time::Duration;
@@ -297,12 +299,13 @@ async fn integration_creates_and_checks_session_file() {
Ok(v) => v,
Err(_) => continue,
};
if item.get("type").and_then(|t| t.as_str()) == Some("message")
&& let Some(c) = item.get("content")
&& c.to_string().contains(&marker)
{
matching_path = Some(path.to_path_buf());
break;
if item.get("type").and_then(|t| t.as_str()) == Some("message") {
if let Some(c) = item.get("content") {
if c.to_string().contains(&marker) {
matching_path = Some(path.to_path_buf());
break;
}
}
}
}
}
@@ -375,12 +378,13 @@ async fn integration_creates_and_checks_session_file() {
let Ok(item) = serde_json::from_str::<serde_json::Value>(line) else {
continue;
};
if item.get("type").and_then(|t| t.as_str()) == Some("message")
&& let Some(c) = item.get("content")
&& c.to_string().contains(&marker)
{
found_message = true;
break;
if item.get("type").and_then(|t| t.as_str()) == Some("message") {
if let Some(c) = item.get("content") {
if c.to_string().contains(&marker) {
found_message = true;
break;
}
}
}
}
assert!(

View File

@@ -1,13 +1,15 @@
use codex_core::ConversationManager;
#![allow(clippy::expect_used, clippy::unwrap_used)]
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::WireApi;
use codex_core::built_in_model_providers;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::SessionConfiguredEvent;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_login::AuthMode;
use codex_login::CodexAuth;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
@@ -26,12 +28,10 @@ fn sse_completed(id: &str) -> String {
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
}
#[expect(clippy::unwrap_used)]
fn assert_message_role(request_body: &serde_json::Value, role: &str) {
assert_eq!(request_body["role"].as_str().unwrap(), role);
}
#[expect(clippy::expect_used)]
fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
.as_str()
@@ -43,7 +43,6 @@ fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
);
}
#[expect(clippy::expect_used)]
fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
.as_str()
@@ -55,61 +54,10 @@ fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) {
);
}
/// Writes an `auth.json` into the provided `codex_home` with the specified parameters.
/// Returns the fake JWT string written to `tokens.id_token`.
#[expect(clippy::unwrap_used)]
fn write_auth_json(
codex_home: &TempDir,
openai_api_key: Option<&str>,
chatgpt_plan_type: &str,
access_token: &str,
account_id: Option<&str>,
) -> String {
use base64::Engine as _;
use serde_json::json;
let header = json!({ "alg": "none", "typ": "JWT" });
let payload = json!({
"email": "user@example.com",
"https://api.openai.com/auth": {
"chatgpt_plan_type": chatgpt_plan_type,
"chatgpt_account_id": account_id.unwrap_or("acc-123")
}
});
let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b);
let header_b64 = b64(&serde_json::to_vec(&header).unwrap());
let payload_b64 = b64(&serde_json::to_vec(&payload).unwrap());
let signature_b64 = b64(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
let mut tokens = json!({
"id_token": fake_jwt,
"access_token": access_token,
"refresh_token": "refresh-test",
});
if let Some(acc) = account_id {
tokens["account_id"] = json!(acc);
}
let auth_json = json!({
"OPENAI_API_KEY": openai_api_key,
"tokens": tokens,
// RFC3339 datetime; value doesn't matter for these tests
"last_refresh": "2025-08-06T20:41:36.232376Z",
});
std::fs::write(
codex_home.path().join("auth.json"),
serde_json::to_string_pretty(&auth_json).unwrap(),
)
.unwrap();
fake_jwt
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_session_id_and_model_headers_in_request() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -142,15 +90,14 @@ async fn includes_session_id_and_model_headers_in_request() {
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
conversation_id,
session_configured: _,
} = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation");
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -161,6 +108,13 @@ async fn includes_session_id_and_model_headers_in_request() {
.await
.unwrap();
let EventMsg::SessionConfigured(SessionConfiguredEvent { session_id, .. }) =
wait_for_event(&codex, |ev| matches!(ev, EventMsg::SessionConfigured(_))).await
else {
unreachable!()
};
let current_session_id = Some(session_id.to_string());
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// get request from the server
@@ -169,9 +123,10 @@ async fn includes_session_id_and_model_headers_in_request() {
let request_authorization = request.headers.get("authorization").unwrap();
let request_originator = request.headers.get("originator").unwrap();
assert!(current_session_id.is_some());
assert_eq!(
request_session_id.to_str().unwrap(),
conversation_id.to_string()
current_session_id.as_ref().unwrap()
);
assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs");
assert_eq!(
@@ -182,6 +137,8 @@ async fn includes_session_id_and_model_headers_in_request() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_base_instructions_override_in_request() {
#![allow(clippy::unwrap_used)]
// Mock server
let server = MockServer::start().await;
@@ -207,12 +164,14 @@ async fn includes_base_instructions_override_in_request() {
config.base_instructions = Some("test instructions".to_string());
config.model_provider = model_provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -238,6 +197,8 @@ async fn includes_base_instructions_override_in_request() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn originator_config_override_is_used() {
#![allow(clippy::unwrap_used)]
// Mock server
let server = MockServer::start().await;
@@ -260,14 +221,16 @@ async fn originator_config_override_is_used() {
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.responses_originator_header = "my_override".to_owned();
config.internal_originator = Some("my_override".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -287,6 +250,8 @@ async fn originator_config_override_is_used() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn chatgpt_auth_sends_correct_request() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -318,15 +283,11 @@ async fn chatgpt_auth_sends_correct_request() {
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
conversation_id,
session_configured: _,
} = conversation_manager
.new_conversation_with_auth(config, Some(create_dummy_codex_auth()))
.await
.expect("create new conversation");
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } =
Codex::spawn(config, Some(create_dummy_codex_auth()), ctrl_c.clone())
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -337,6 +298,13 @@ async fn chatgpt_auth_sends_correct_request() {
.await
.unwrap();
let EventMsg::SessionConfigured(SessionConfiguredEvent { session_id, .. }) =
wait_for_event(&codex, |ev| matches!(ev, EventMsg::SessionConfigured(_))).await
else {
unreachable!()
};
let current_session_id = Some(session_id.to_string());
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// get request from the server
@@ -347,9 +315,10 @@ async fn chatgpt_auth_sends_correct_request() {
let request_chatgpt_account_id = request.headers.get("chatgpt-account-id").unwrap();
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(current_session_id.is_some());
assert_eq!(
request_session_id.to_str().unwrap(),
conversation_id.to_string()
current_session_id.as_ref().unwrap()
);
assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs");
assert_eq!(
@@ -365,158 +334,10 @@ async fn chatgpt_auth_sends_correct_request() {
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
// Expect ChatGPT base path and correct headers
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(header_regex("Authorization", r"Bearer Access-123"))
.and(header_regex("chatgpt-account-id", r"acc-123"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT.
let _jwt = write_auth_json(
&codex_home,
Some("sk-test-key"),
"pro",
"Access-123",
Some("acc-123"),
);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ChatGPT;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
..
} = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation");
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// verify request body flags
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
!request_body["store"].as_bool().unwrap(),
"store should be false for ChatGPT auth"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
// Expect API key header, no ChatGPT account header required.
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(header_regex("Authorization", r"Bearer sk-test-key"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT,
// but config will force API key preference.
let _jwt = write_auth_json(
&codex_home,
Some("sk-test-key"),
"pro",
"Access-123",
Some("acc-123"),
);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ApiKey;
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation: codex,
..
} = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation");
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// verify request body flags
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
request_body["store"].as_bool().unwrap(),
"store should be true for API key auth"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_user_instructions_message_in_request() {
#![allow(clippy::unwrap_used)]
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
@@ -540,12 +361,14 @@ async fn includes_user_instructions_message_in_request() {
config.model_provider = model_provider;
config.user_instructions = Some("be nice".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -568,15 +391,17 @@ async fn includes_user_instructions_message_in_request() {
.contains("be nice")
);
assert_message_role(&request_body["input"][0], "user");
assert_message_starts_with(&request_body["input"][0], "<user_instructions>");
assert_message_ends_with(&request_body["input"][0], "</user_instructions>");
assert_message_starts_with(&request_body["input"][0], "<environment_context>\n\n");
assert_message_ends_with(&request_body["input"][0], "</environment_context>");
assert_message_role(&request_body["input"][1], "user");
assert_message_starts_with(&request_body["input"][1], "<environment_context>");
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
assert_message_starts_with(&request_body["input"][1], "<user_instructions>\n\n");
assert_message_ends_with(&request_body["input"][1], "</user_instructions>");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn azure_overrides_assign_properties_used_for_responses_url() {
#![allow(clippy::unwrap_used)]
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };
// Mock server
@@ -632,12 +457,8 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, None)
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(config, None, ctrl_c.clone()).await.unwrap();
codex
.submit(Op::UserInput {
@@ -653,6 +474,8 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn env_var_overrides_loaded_auth() {
#![allow(clippy::unwrap_used)]
let existing_env_var_with_random_value = if cfg!(windows) { "USERNAME" } else { "USER" };
// Mock server
@@ -708,12 +531,11 @@ async fn env_var_overrides_loaded_auth() {
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(create_dummy_codex_auth()))
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } =
Codex::spawn(config, Some(create_dummy_codex_auth()), ctrl_c.clone())
.await
.unwrap();
codex
.submit(Op::UserInput {

View File

@@ -1,8 +1,7 @@
#![expect(clippy::expect_used)]
#![allow(clippy::expect_used)]
use tempfile::TempDir;
use codex_core::CodexConversation;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
@@ -47,26 +46,6 @@ pub fn load_sse_fixture(path: impl AsRef<std::path::Path>) -> String {
.collect()
}
pub fn load_sse_fixture_with_id_from_str(raw: &str, id: &str) -> String {
let replaced = raw.replace("__ID__", id);
let events: Vec<serde_json::Value> =
serde_json::from_str(&replaced).expect("parse JSON fixture");
events
.into_iter()
.map(|e| {
let kind = e
.get("type")
.and_then(|v| v.as_str())
.expect("fixture event missing type");
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
format!("event: {kind}\n\n")
} else {
format!("event: {kind}\ndata: {e}\n\n")
}
})
.collect()
}
/// Same as [`load_sse_fixture`], but replaces the placeholder `__ID__` in the
/// fixture template with the supplied identifier before parsing. This lets a
/// single JSON template be reused by multiple tests that each need a unique
@@ -93,7 +72,7 @@ pub fn load_sse_fixture_with_id(path: impl AsRef<std::path::Path>, id: &str) ->
}
pub async fn wait_for_event<F>(
codex: &CodexConversation,
codex: &codex_core::Codex,
predicate: F,
) -> codex_core::protocol::EventMsg
where
@@ -104,18 +83,16 @@ where
}
pub async fn wait_for_event_with_timeout<F>(
codex: &CodexConversation,
codex: &codex_core::Codex,
mut predicate: F,
wait_time: tokio::time::Duration,
) -> codex_core::protocol::EventMsg
where
F: FnMut(&codex_core::protocol::EventMsg) -> bool,
{
use tokio::time::Duration;
use tokio::time::timeout;
loop {
// Allow a bit more time to accommodate async startup work (e.g. config IO, tool discovery)
let ev = timeout(wait_time.max(Duration::from_secs(5)), codex.next_event())
let ev = timeout(wait_time, codex.next_event())
.await
.expect("timeout waiting for event")
.expect("stream ended unexpectedly");

View File

@@ -1,6 +1,7 @@
#![expect(clippy::unwrap_used)]
use codex_core::ConversationManager;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::protocol::EventMsg;
@@ -141,12 +142,14 @@ async fn summarize_context_three_requests_and_instructions() {
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("dummy")))
.await
.unwrap()
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("dummy")),
ctrl_c.clone(),
)
.await
.unwrap();
// 1) Normal user input should hit server once.
codex

View File

@@ -1,6 +1,8 @@
#![cfg(target_os = "macos")]
#![expect(clippy::unwrap_used, clippy::expect_used)]
use std::collections::HashMap;
use std::sync::Arc;
use codex_core::exec::ExecParams;
use codex_core::exec::ExecToolCallOutput;
@@ -9,6 +11,7 @@ use codex_core::exec::process_exec_tool_call;
use codex_core::protocol::SandboxPolicy;
use codex_core::spawn::CODEX_SANDBOX_ENV_VAR;
use tempfile::TempDir;
use tokio::sync::Notify;
use codex_core::error::Result;
@@ -23,7 +26,6 @@ fn skip_test() -> bool {
false
}
#[expect(clippy::expect_used)]
async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput> {
let sandbox_type = get_platform_sandbox().expect("should be able to get sandbox type");
assert_eq!(sandbox_type, SandboxType::MacosSeatbelt);
@@ -37,9 +39,10 @@ async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput
justification: None,
};
let ctrl_c = Arc::new(Notify::new());
let policy = SandboxPolicy::new_read_only_policy();
process_exec_tool_call(params, sandbox_type, &policy, &None, None).await
process_exec_tool_call(params, sandbox_type, ctrl_c, &policy, &None, None).await
}
/// Command succeeds with exit code 0 normally
@@ -68,6 +71,7 @@ async fn truncates_output_lines() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let cmd = vec!["seq", "300"];
#[expect(clippy::unwrap_used)]
let output = run_test_cmd(tmp, cmd).await.unwrap();
let expected_output = (1..=256)

View File

@@ -2,6 +2,7 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use async_channel::Receiver;
use codex_core::exec::ExecParams;
@@ -13,6 +14,7 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecCommandOutputDeltaEvent;
use codex_core::protocol::ExecOutputStream;
use codex_core::protocol::SandboxPolicy;
use tokio::sync::Notify;
fn collect_stdout_events(rx: Receiver<Event>) -> Vec<u8> {
let mut out = Vec::new();
@@ -55,11 +57,13 @@ async fn test_exec_stdout_stream_events_echo() {
justification: None,
};
let ctrl_c = Arc::new(Notify::new());
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(
params,
SandboxType::None,
ctrl_c,
&policy,
&None,
Some(stdout_stream),
@@ -105,11 +109,13 @@ async fn test_exec_stderr_stream_events_echo() {
justification: None,
};
let ctrl_c = Arc::new(Notify::new());
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(
params,
SandboxType::None,
ctrl_c,
&policy,
&None,
Some(stdout_stream),

View File

@@ -0,0 +1,209 @@
#![expect(clippy::unwrap_used, clippy::expect_used)]
//! Live integration tests that exercise the full [`Agent`] stack **against the real
//! OpenAI `/v1/responses` API**. These tests complement the lightweight mockbased
//! unit tests by verifying that the agent can drive an endtoend conversation,
//! stream incremental events, execute functioncall tool invocations and safely
//! chain multiple turns inside a single session the exact scenarios that have
//! historically been brittle.
//!
//! The live tests are **ignored by default** so CI remains deterministic and free
//! of external dependencies. Developers can optin locally with e.g.
//!
//! ```bash
//! OPENAI_API_KEY=sk... cargo test --test live_agent -- --ignored --nocapture
//! ```
//!
//! Make sure your key has access to the experimental *Responses* API and that
//! any billable usage is acceptable.
use std::time::Duration;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::error::CodexErr;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use core_test_support::load_default_config_for_test;
use tempfile::TempDir;
use tokio::sync::Notify;
use tokio::time::timeout;
fn api_key_available() -> bool {
std::env::var("OPENAI_API_KEY").is_ok()
}
/// Helper that spawns a fresh Agent and sends the mandatory *ConfigureSession*
/// submission. The caller receives the constructed [`Agent`] plus the unique
/// submission id used for the initialization message.
async fn spawn_codex() -> Result<Codex, CodexErr> {
assert!(
api_key_available(),
"OPENAI_API_KEY must be set for live tests"
);
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider.request_max_retries = Some(2);
config.model_provider.stream_max_retries = Some(2);
let CodexSpawnOk { codex: agent, .. } =
Codex::spawn(config, None, std::sync::Arc::new(Notify::new())).await?;
Ok(agent)
}
/// Verifies that the agent streams incremental *AgentMessage* events **before**
/// emitting `TaskComplete` and that a second task inside the same session does
/// not get tripped up by a stale `previous_response_id`.
#[ignore]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn live_streaming_and_prev_id_reset() {
if !api_key_available() {
eprintln!("skipping live_streaming_and_prev_id_reset OPENAI_API_KEY not set");
return;
}
let codex = spawn_codex().await.unwrap();
// ---------- Task 1 ----------
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "Say the words 'stream test'".into(),
}],
})
.await
.unwrap();
let mut saw_message_before_complete = false;
loop {
let ev = timeout(Duration::from_secs(60), codex.next_event())
.await
.expect("timeout waiting for task1 events")
.expect("agent closed");
match ev.msg {
EventMsg::AgentMessage(_) => saw_message_before_complete = true,
EventMsg::TaskComplete(_) => break,
EventMsg::Error(ErrorEvent { message }) => {
panic!("agent reported error in task1: {message}")
}
_ => {
// Ignore other events.
}
}
}
assert!(
saw_message_before_complete,
"Agent did not stream any AgentMessage before TaskComplete"
);
// ---------- Task 2 (same session) ----------
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "Respond with exactly: second turn succeeded".into(),
}],
})
.await
.unwrap();
let mut got_expected = false;
loop {
let ev = timeout(Duration::from_secs(60), codex.next_event())
.await
.expect("timeout waiting for task2 events")
.expect("agent closed");
match &ev.msg {
EventMsg::AgentMessage(AgentMessageEvent { message })
if message.contains("second turn succeeded") =>
{
got_expected = true;
}
EventMsg::TaskComplete(_) => break,
EventMsg::Error(ErrorEvent { message }) => {
panic!("agent reported error in task2: {message}")
}
_ => {
// Ignore other events.
}
}
}
assert!(got_expected, "second task did not receive expected answer");
}
/// Exercises a *functioncall → shell execution* roundtrip by instructing the
/// model to run a harmless `echo` command. The test asserts that:
/// 1. the function call is executed (we see `ExecCommandBegin`/`End` events)
/// 2. the captured stdout reaches the client unchanged.
#[ignore]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn live_shell_function_call() {
if !api_key_available() {
eprintln!("skipping live_shell_function_call OPENAI_API_KEY not set");
return;
}
let codex = spawn_codex().await.unwrap();
const MARKER: &str = "codex_live_echo_ok";
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: format!(
"Use the shell function to run the command `echo {MARKER}` and no other commands."
),
}],
})
.await
.unwrap();
let mut saw_begin = false;
let mut saw_end_with_output = false;
loop {
let ev = timeout(Duration::from_secs(60), codex.next_event())
.await
.expect("timeout waiting for functioncall events")
.expect("agent closed");
match ev.msg {
EventMsg::ExecCommandBegin(codex_core::protocol::ExecCommandBeginEvent {
command,
..
}) => {
assert_eq!(command, vec!["echo", MARKER]);
saw_begin = true;
}
EventMsg::ExecCommandEnd(codex_core::protocol::ExecCommandEndEvent {
stdout,
exit_code,
..
}) => {
assert_eq!(exit_code, 0, "echo returned nonzero exit code");
assert!(stdout.contains(MARKER));
saw_end_with_output = true;
}
EventMsg::TaskComplete(_) => break,
EventMsg::Error(codex_core::protocol::ErrorEvent { message }) => {
panic!("agent error during shell test: {message}")
}
_ => {
// Ignore other events.
}
}
}
assert!(saw_begin, "ExecCommandBegin event missing");
assert!(
saw_end_with_output,
"ExecCommandEnd with expected output missing"
);
}

View File

@@ -17,7 +17,7 @@ fn require_api_key() -> String {
/// Helper that spawns the binary inside a TempDir with minimal flags. Returns (Assert, TempDir).
fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) {
#![expect(clippy::unwrap_used)]
#![allow(clippy::unwrap_used)]
use std::io::Read;
use std::io::Write;
use std::thread;
@@ -113,6 +113,7 @@ fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) {
#[ignore]
#[test]
fn live_create_file_hello_txt() {
#![allow(clippy::unwrap_used)]
if std::env::var("OPENAI_API_KEY").is_err() {
eprintln!("skipping live_create_file_hello_txt OPENAI_API_KEY not set");
return;

View File

@@ -1,13 +1,12 @@
use codex_core::ConversationManager;
#![allow(clippy::expect_used, clippy::unwrap_used)]
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_login::CodexAuth;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
@@ -26,6 +25,7 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefixes_context_and_instructions_once_and_consistently_across_requests() {
#![allow(clippy::unwrap_used)]
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
@@ -55,12 +55,14 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
@@ -86,7 +88,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
assert_eq!(requests.len(), 2, "expected two POST requests");
let expected_env_text = format!(
"<environment_context>\nCurrent working directory: {}\nApproval policy: on-request\nSandbox mode: read-only\nNetwork access: restricted\n</environment_context>",
"<environment_context>\n\nCurrent working directory: {}\nApproval policy: on-request\nSandbox policy: read-only\nNetwork access: restricted\n\n\n</environment_context>",
cwd.path().to_string_lossy()
);
let expected_ui_text =
@@ -114,7 +116,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
assert_eq!(
body1["input"],
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
serde_json::json!([expected_env_msg, expected_ui_msg, expected_user_message_1])
);
let expected_user_message_2 = serde_json::json!({
@@ -133,230 +135,3 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
);
assert_eq!(body2["input"], expected_body2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
// First turn
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 1".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Change everything about the turn context.
let new_cwd = TempDir::new().unwrap();
let writable = TempDir::new().unwrap();
codex
.submit(Op::OverrideTurnContext {
cwd: Some(new_cwd.path().to_path_buf()),
approval_policy: Some(AskForApproval::Never),
sandbox_policy: Some(SandboxPolicy::WorkspaceWrite {
writable_roots: vec![writable.path().to_path_buf()],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
}),
model: Some("o3".to_string()),
effort: Some(ReasoningEffort::High),
summary: Some(ReasoningSummary::Detailed),
})
.await
.unwrap();
// Second turn after overrides
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 2".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
// prompt_cache_key should remain constant across overrides
assert_eq!(
body1["prompt_cache_key"], body2["prompt_cache_key"],
"prompt_cache_key should not change across overrides"
);
// The entire prefix from the first request should be identical and reused
// as the prefix of the second request, ensuring cache hit potential.
let expected_user_message_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
// After overriding the turn context, the environment context should be emitted again
// reflecting the new cwd, approval policy and sandbox settings.
let expected_env_text_2 = format!(
"<environment_context>\nCurrent working directory: {}\nApproval policy: never\nSandbox mode: workspace-write\nNetwork access: enabled\n</environment_context>",
new_cwd.path().to_string_lossy()
);
let expected_env_msg_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_env_msg_2, expected_user_message_2].as_slice(),
]
.concat()
);
assert_eq!(body2["input"], expected_body2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Expect two POSTs to /v1/responses
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect(2)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.expect("create new conversation")
.conversation;
// First turn
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello 1".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Second turn using per-turn overrides via UserTurn
let new_cwd = TempDir::new().unwrap();
let writable = TempDir::new().unwrap();
codex
.submit(Op::UserTurn {
items: vec![InputItem::Text {
text: "hello 2".into(),
}],
cwd: new_cwd.path().to_path_buf(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::WorkspaceWrite {
writable_roots: vec![writable.path().to_path_buf()],
network_access: true,
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
},
model: "o3".to_string(),
effort: ReasoningEffort::High,
summary: ReasoningSummary::Detailed,
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Verify we issued exactly two requests, and the cached prefix stayed identical.
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body2 = requests[1].body_json::<serde_json::Value>().unwrap();
// prompt_cache_key should remain constant across per-turn overrides
assert_eq!(
body1["prompt_cache_key"], body2["prompt_cache_key"],
"prompt_cache_key should not change across per-turn overrides"
);
// The entire prefix from the first request should be identical and reused
// as the prefix of the second request.
let expected_user_message_2 = serde_json::json!({
"type": "message",
"id": serde_json::Value::Null,
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_user_message_2].as_slice(),
]
.concat()
);
assert_eq!(body2["input"], expected_body2);
}

View File

@@ -1,7 +1,5 @@
#![cfg(target_os = "macos")]
//! Tests for the macOS sandboxing that are specific to Seatbelt.
//! Tests that apply to both Mac and Linux sandboxing should go in sandbox.rs.
#![expect(clippy::expect_used)]
use std::collections::HashMap;
use std::path::Path;
@@ -159,7 +157,6 @@ async fn read_only_forbids_all_writes() {
.await;
}
#[expect(clippy::expect_used)]
fn create_test_scenario(tmp: &TempDir) -> TestScenario {
let repo_parent = tmp.path().to_path_buf();
let repo_root = repo_parent.join("repo");
@@ -177,7 +174,6 @@ fn create_test_scenario(tmp: &TempDir) -> TestScenario {
}
}
#[expect(clippy::expect_used)]
/// Note that `path` must be absolute.
async fn touch(path: &Path, policy: &SandboxPolicy) -> bool {
assert!(path.is_absolute(), "Path must be absolute: {path:?}");

View File

@@ -1,6 +1,7 @@
use std::time::Duration;
use codex_core::ConversationManager;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_core::protocol::EventMsg;
@@ -25,6 +26,7 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn continue_after_stream_error() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -88,12 +90,13 @@ async fn continue_after_stream_error() {
config.base_instructions = Some("You are a helpful assistant".to_string());
config.model_provider = provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.unwrap()
.conversation;
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
std::sync::Arc::new(tokio::sync::Notify::new()),
)
.await
.unwrap();
codex
.submit(Op::UserInput {

View File

@@ -3,7 +3,8 @@
use std::time::Duration;
use codex_core::ConversationManager;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
@@ -33,6 +34,8 @@ fn sse_completed(id: &str) -> String {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn retries_on_early_close() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
@@ -90,15 +93,17 @@ async fn retries_on_early_close() {
requires_openai_auth: false,
};
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let conversation_manager = ConversationManager::default();
let codex = conversation_manager
.new_conversation_with_auth(config, Some(CodexAuth::from_api_key("Test API Key")))
.await
.unwrap()
.conversation;
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key")),
ctrl_c,
)
.await
.unwrap();
codex
.submit(Op::UserInput {

View File

@@ -26,7 +26,6 @@ codex-common = { path = "../common", features = [
] }
codex-core = { path = "../core" }
codex-ollama = { path = "../ollama" }
codex-protocol = { path = "../protocol" }
owo-colors = "4.2.0"
serde_json = "1"
shlex = "1.3.0"
@@ -42,8 +41,5 @@ tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
[dev-dependencies]
assert_cmd = "2"
core_test_support = { path = "../core/tests/common" }
libc = "0.2"
predicates = "3"
tempfile = "3.13.0"
wiremock = "0.6"

View File

@@ -29,9 +29,9 @@ pub(crate) fn handle_last_message(last_agent_message: Option<&str>, output_file:
}
fn write_last_message_file(contents: &str, last_message_path: Option<&Path>) {
if let Some(path) = last_message_path
&& let Err(e) = std::fs::write(path, contents)
{
eprintln!("Failed to write last message file {path:?}: {e}");
if let Some(path) = last_message_path {
if let Err(e) = std::fs::write(path, contents) {
eprintln!("Failed to write last message file {path:?}: {e}");
}
}
}

View File

@@ -21,7 +21,6 @@ use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::SessionConfiguredEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::protocol::TurnAbortReason;
use codex_core::protocol::TurnDiffEvent;
use owo_colors::OwoColorize;
use owo_colors::Style;
@@ -192,7 +191,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.answer_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
@@ -208,15 +207,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.reasoning_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningSectionBreak(_) => {
if !self.show_agent_reasoning {
return CodexStatus::Running;
}
println!();
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent { text }) => {
@@ -225,7 +216,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
}
if !self.raw_reasoning_started {
print!("{text}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
} else {
println!();
@@ -242,7 +233,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
self.raw_reasoning_started = true;
}
print!("{delta}");
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
std::io::stdout().flush().expect("could not flush stdout");
}
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
@@ -523,17 +514,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::GetHistoryEntryResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::McpListToolsResponse(_) => {
// Currently ignored in exec output.
}
EventMsg::TurnAborted(abort_reason) => match abort_reason.reason {
TurnAbortReason::Interrupted => {
ts_println!(self, "task interrupted");
}
TurnAbortReason::Replaced => {
ts_println!(self, "task aborted: replaced by a new task");
}
},
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
}
CodexStatus::Running

View File

@@ -28,7 +28,7 @@ impl EventProcessor for EventProcessorWithJsonOutput {
.into_iter()
.map(|(key, value)| (key.to_string(), value))
.collect::<HashMap<String, String>>();
#[expect(clippy::expect_used)]
#[allow(clippy::expect_used)]
let config_json =
serde_json::to_string(&entries).expect("Failed to serialize config summary to JSON");
println!("{config_json}");

View File

@@ -6,13 +6,15 @@ mod event_processor_with_json_output;
use std::io::IsTerminal;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
pub use cli::Cli;
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_core::ConversationManager;
use codex_core::NewConversation;
use codex_core::codex_wrapper::CodexConversation;
use codex_core::codex_wrapper::{self};
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config_types::SandboxMode;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
@@ -21,7 +23,6 @@ use codex_core::protocol::Op;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::util::is_inside_git_repo;
use codex_ollama::DEFAULT_OSS_MODEL;
use codex_protocol::config_types::SandboxMode;
use event_processor_with_human_output::EventProcessorWithHumanOutput;
use event_processor_with_json_output::EventProcessorWithJsonOutput;
use tracing::debug;
@@ -146,7 +147,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
codex_linux_sandbox_exe,
base_instructions: None,
include_plan_tool: None,
include_apply_patch_tool: None,
disable_response_storage: oss.then_some(true),
show_raw_agent_reasoning: oss.then_some(true),
};
@@ -185,30 +185,35 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
std::process::exit(1);
}
let conversation_manager = ConversationManager::default();
let NewConversation {
conversation_id: _,
conversation,
let CodexConversation {
codex: codex_wrapper,
session_configured,
} = conversation_manager.new_conversation(config).await?;
ctrl_c,
..
} = codex_wrapper::init_codex(config).await?;
let codex = Arc::new(codex_wrapper);
info!("Codex initialized with event: {session_configured:?}");
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<Event>();
{
let conversation = conversation.clone();
let codex = codex.clone();
tokio::spawn(async move {
loop {
let interrupted = ctrl_c.notified();
tokio::select! {
_ = tokio::signal::ctrl_c() => {
tracing::debug!("Keyboard interrupt");
// Immediately notify Codex to abort any inflight task.
conversation.submit(Op::Interrupt).await.ok();
_ = interrupted => {
// Forward an interrupt to the codex so it can abort any inflight task.
let _ = codex
.submit(
Op::Interrupt,
)
.await;
// Exit the inner loop and return to the main input prompt. The codex
// Exit the inner loop and return to the main input prompt. The codex
// will emit a `TurnInterrupted` (Error) event which is drained later.
break;
}
res = conversation.next_event() => match res {
res = codex.next_event() => match res {
Ok(event) => {
debug!("Received event: {event:?}");
@@ -238,9 +243,9 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
.into_iter()
.map(|path| InputItem::LocalImage { path })
.collect();
let initial_images_event_id = conversation.submit(Op::UserInput { items }).await?;
let initial_images_event_id = codex.submit(Op::UserInput { items }).await?;
info!("Sent images with event ID: {initial_images_event_id}");
while let Ok(event) = conversation.next_event().await {
while let Ok(event) = codex.next_event().await {
if event.id == initial_images_event_id
&& matches!(
event.msg,
@@ -256,7 +261,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
// Send the prompt.
let items: Vec<InputItem> = vec![InputItem::Text { text: prompt }];
let initial_prompt_task_id = conversation.submit(Op::UserInput { items }).await?;
let initial_prompt_task_id = codex.submit(Op::UserInput { items }).await?;
info!("Sent prompt with event ID: {initial_prompt_task_id}");
// Run the loop until the task is complete.
@@ -265,7 +270,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
match shutdown {
CodexStatus::Running => continue,
CodexStatus::InitiateShutdown => {
conversation.submit(Op::Shutdown).await?;
codex.submit(Op::Shutdown).await?;
}
CodexStatus::Shutdown => {
break;

View File

@@ -1,5 +1,3 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use anyhow::Context;
use assert_cmd::prelude::*;
use codex_core::CODEX_APPLY_PATCH_ARG1;
@@ -39,152 +37,3 @@ fn test_standalone_exec_cli_can_use_apply_patch() -> anyhow::Result<()> {
);
Ok(())
}
#[cfg(not(target_os = "windows"))]
#[tokio::test]
async fn test_apply_patch_tool() -> anyhow::Result<()> {
use core_test_support::load_sse_fixture_with_id_from_str;
use tempfile::TempDir;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
const SSE_TOOL_CALL_ADD: &str = r#"[
{
"type": "response.output_item.done",
"item": {
"type": "function_call",
"name": "apply_patch",
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Add File: test.md\\n+Hello world\\n*** End Patch\"\n}",
"call_id": "__ID__"
}
},
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
const SSE_TOOL_CALL_UPDATE: &str = r#"[
{
"type": "response.output_item.done",
"item": {
"type": "function_call",
"name": "apply_patch",
"arguments": "{\n \"input\": \"*** Begin Patch\\n*** Update File: test.md\\n@@\\n-Hello world\\n+Final text\\n*** End Patch\"\n}",
"call_id": "__ID__"
}
},
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
const SSE_TOOL_CALL_COMPLETED: &str = r#"[
{
"type": "response.completed",
"response": {
"id": "__ID__",
"usage": {
"input_tokens": 0,
"input_tokens_details": null,
"output_tokens": 0,
"output_tokens_details": null,
"total_tokens": 0
},
"output": []
}
}
]"#;
// Start a mock model server
let server = MockServer::start().await;
// First response: model calls apply_patch to create test.md
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_ADD, "call1"),
"text/event-stream",
);
Mock::given(method("POST"))
// .and(path("/v1/responses"))
.respond_with(first)
.up_to_n_times(1)
.mount(&server)
.await;
// Second response: model calls apply_patch to update test.md
let second = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_UPDATE, "call2"),
"text/event-stream",
);
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(second)
.up_to_n_times(1)
.mount(&server)
.await;
let final_completed = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(
load_sse_fixture_with_id_from_str(SSE_TOOL_CALL_COMPLETED, "resp3"),
"text/event-stream",
);
Mock::given(method("POST"))
// .and(path("/v1/responses"))
.respond_with(final_completed)
.expect(1)
.mount(&server)
.await;
let tmp_cwd = TempDir::new().unwrap();
Command::cargo_bin("codex-exec")
.context("should find binary for codex-exec")?
.current_dir(tmp_cwd.path())
.env("CODEX_HOME", tmp_cwd.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()))
.arg("--skip-git-repo-check")
.arg("-s")
.arg("workspace-write")
.arg("foo")
.assert()
.success();
// Verify final file contents
let final_path = tmp_cwd.path().join("test.md");
let contents = std::fs::read_to_string(&final_path)
.unwrap_or_else(|e| panic!("failed reading {}: {e}", final_path.display()));
assert_eq!(contents, "Final text\n");
Ok(())
}

View File

@@ -1,219 +0,0 @@
#![cfg(unix)]
use codex_core::protocol::SandboxPolicy;
use codex_core::spawn::StdioPolicy;
use std::collections::HashMap;
use std::future::Future;
use std::io;
use std::path::PathBuf;
use std::process::ExitStatus;
use tokio::process::Child;
#[cfg(target_os = "macos")]
async fn spawn_command_under_sandbox(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child> {
use codex_core::seatbelt::spawn_command_under_seatbelt;
spawn_command_under_seatbelt(command, sandbox_policy, cwd, stdio_policy, env).await
}
#[cfg(target_os = "linux")]
async fn spawn_command_under_sandbox(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child> {
use codex_core::landlock::spawn_command_under_linux_sandbox;
let codex_linux_sandbox_exe = assert_cmd::cargo::cargo_bin("codex-exec");
spawn_command_under_linux_sandbox(
codex_linux_sandbox_exe,
command,
sandbox_policy,
cwd,
stdio_policy,
env,
)
.await
}
#[tokio::test]
async fn python_multiprocessing_lock_works_under_sandbox() {
#[cfg(target_os = "macos")]
let writable_roots = Vec::<PathBuf>::new();
// From https://man7.org/linux/man-pages/man7/sem_overview.7.html
//
// > On Linux, named semaphores are created in a virtual filesystem,
// > normally mounted under /dev/shm.
#[cfg(target_os = "linux")]
let writable_roots = vec![PathBuf::from("/dev/shm")];
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
};
let python_code = r#"import multiprocessing
from multiprocessing import Lock, Process
def f(lock):
with lock:
print("Lock acquired in child process")
if __name__ == '__main__':
lock = Lock()
p = Process(target=f, args=(lock,))
p.start()
p.join()
"#;
let mut child = spawn_command_under_sandbox(
vec![
"python3".to_string(),
"-c".to_string(),
python_code.to_string(),
],
&policy,
std::env::current_dir().expect("should be able to get current dir"),
StdioPolicy::Inherit,
HashMap::new(),
)
.await
.expect("should be able to spawn python under sandbox");
let status = child.wait().await.expect("should wait for child process");
assert!(status.success(), "python exited with {status:?}");
}
fn unix_sock_body() {
unsafe {
let mut fds = [0i32; 2];
let r = libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr());
assert_eq!(
r,
0,
"socketpair(AF_UNIX, SOCK_DGRAM) failed: {}",
io::Error::last_os_error()
);
let msg = b"hello_unix";
// write() from one end (generic write is allowed)
let sent = libc::write(fds[0], msg.as_ptr() as *const libc::c_void, msg.len());
assert!(sent >= 0, "write() failed: {}", io::Error::last_os_error());
// recvfrom() on the other end. We dont need the address for socketpair,
// so we pass null pointers for src address.
let mut buf = [0u8; 64];
let recvd = libc::recvfrom(
fds[1],
buf.as_mut_ptr() as *mut libc::c_void,
buf.len(),
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
assert!(
recvd >= 0,
"recvfrom() failed: {}",
io::Error::last_os_error()
);
let recvd_slice = &buf[..(recvd as usize)];
assert_eq!(
recvd_slice,
&msg[..],
"payload mismatch: sent {} bytes, got {} bytes",
msg.len(),
recvd
);
// Also exercise AF_UNIX stream socketpair quickly to ensure AF_UNIX in general works.
let mut sfds = [0i32; 2];
let sr = libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, sfds.as_mut_ptr());
assert_eq!(
sr,
0,
"socketpair(AF_UNIX, SOCK_STREAM) failed: {}",
io::Error::last_os_error()
);
let snt2 = libc::write(sfds[0], msg.as_ptr() as *const libc::c_void, msg.len());
assert!(
snt2 >= 0,
"write(stream) failed: {}",
io::Error::last_os_error()
);
let mut b2 = [0u8; 64];
let rcv2 = libc::recv(sfds[1], b2.as_mut_ptr() as *mut libc::c_void, b2.len(), 0);
assert!(
rcv2 >= 0,
"recv(stream) failed: {}",
io::Error::last_os_error()
);
// Clean up
let _ = libc::close(sfds[0]);
let _ = libc::close(sfds[1]);
let _ = libc::close(fds[0]);
let _ = libc::close(fds[1]);
}
}
#[tokio::test]
async fn allow_unix_socketpair_recvfrom() {
run_code_under_sandbox(
"allow_unix_socketpair_recvfrom",
&SandboxPolicy::ReadOnly,
|| async { unix_sock_body() },
)
.await
.expect("should be able to reexec");
}
const IN_SANDBOX_ENV_VAR: &str = "IN_SANDBOX";
#[expect(clippy::expect_used)]
pub async fn run_code_under_sandbox<F, Fut>(
test_selector: &str,
policy: &SandboxPolicy,
child_body: F,
) -> io::Result<Option<ExitStatus>>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
if std::env::var(IN_SANDBOX_ENV_VAR).is_err() {
let exe = std::env::current_exe()?;
let mut cmds = vec![exe.to_string_lossy().into_owned(), "--exact".into()];
let mut stdio_policy = StdioPolicy::RedirectForShellTool;
// Allow for us to pass forward --nocapture / use the right stdio policy.
if std::env::args().any(|a| a == "--nocapture") {
cmds.push("--nocapture".into());
stdio_policy = StdioPolicy::Inherit;
}
cmds.push(test_selector.into());
// Your existing launcher:
let mut child = spawn_command_under_sandbox(
cmds,
policy,
std::env::current_dir().expect("should be able to get current dir"),
stdio_policy,
HashMap::from([("IN_SANDBOX".into(), "1".into())]),
)
.await?;
let status = child.wait().await?;
Ok(Some(status))
} else {
// Child branch: run the provided body.
child_body().await;
Ok(None)
}
}

View File

@@ -140,6 +140,7 @@ fn is_executable_file(path: &str) -> bool {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use tempfile::TempDir;
use super::*;
@@ -214,12 +215,7 @@ system_path=[{fake_cp:?}]
// Only readable folders specified.
assert_eq!(
checker.check(
valid_exec.clone(),
&cwd,
std::slice::from_ref(&root_path),
&[]
),
checker.check(valid_exec.clone(), &cwd, &[root_path.clone()], &[]),
Err(WriteablePathNotInWriteableFolders {
file: dest_path.clone(),
folders: vec![]
@@ -231,8 +227,8 @@ system_path=[{fake_cp:?}]
checker.check(
valid_exec.clone(),
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&root_path)
&[root_path.clone()],
&[root_path.clone()]
),
Ok(cp.clone()),
);
@@ -251,8 +247,8 @@ system_path=[{fake_cp:?}]
checker.check(
valid_exec_call_folders_as_args,
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&root_path)
&[root_path.clone()],
&[root_path.clone()]
),
Ok(cp.clone()),
);
@@ -274,8 +270,8 @@ system_path=[{fake_cp:?}]
checker.check(
exec_with_parent_of_readable_folder,
&cwd,
std::slice::from_ref(&root_path),
std::slice::from_ref(&dest_path)
&[root_path.clone()],
&[dest_path.clone()]
),
Err(ReadablePathNotInReadableFolders {
file: root_path.parent().unwrap().to_path_buf(),

Some files were not shown because too many files have changed in this diff Show More