Compare commits

..

1 Commits

Author SHA1 Message Date
Thibault Sottiaux
1fe35e4f7a docs: refresh codex-cli readme 2025-10-20 14:51:01 -07:00
380 changed files with 10382 additions and 31460 deletions

View File

@@ -16,7 +16,7 @@ jobs:
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Prepare Codex inputs
env:
@@ -87,7 +87,7 @@ jobs:
issues: write
steps:
- name: Comment on issue
uses: actions/github-script@v8
uses: actions/github-script@v7
env:
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
with:

View File

@@ -16,7 +16,7 @@ jobs:
outputs:
codex_output: ${{ steps.codex.outputs.final-message }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- id: codex
uses: openai/codex-action@main

View File

@@ -9,7 +9,7 @@ on:
# CI builds in debug (dev) for faster signal.
jobs:
# --- Detect what changed to detect which tests to run (always runs) -------------------------------------
# --- Detect what changed (always runs) -------------------------------------
changed:
name: Detect changed areas
runs-on: ubuntu-24.04
@@ -84,8 +84,8 @@ jobs:
run: cargo shear
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
lint_build_test:
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
@@ -94,11 +94,6 @@ jobs:
defaults:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects.
RUSTC_WRAPPER: sccache
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
@@ -164,83 +159,20 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
# Install and restore sccache cache
- name: Install sccache
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Restore sccache cache (fallback)
if: ${{ env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
- name: Restore target cache (except gnu-dev)
id: cache_target_restore
if: ${{ !(matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release') }}
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
run: |
set -euo pipefail
sudo mkdir -p /var/cache/apt/archives /var/lib/apt/lists
sudo chown -R "$USER:$USER" /var/cache/apt /var/lib/apt/lists
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@v4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
path: ${{ github.workspace }}/codex-rs/target/
key: cargo-target-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
shell: bash
run: |
set -euo pipefail
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: cargo-chef
version: 0.1.71
- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
shell: bash
run: |
set -euo pipefail
RECIPE="${RUNNER_TEMP}/chef-recipe.json"
cargo chef prepare --recipe-path "$RECIPE"
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release --all-features
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
- name: cargo clippy
id: clippy
@@ -259,6 +191,20 @@ jobs:
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: nextest
version: 0.9.103
- name: tests
id: test
# Tests take too long for release builds to run them on every PR.
if: ${{ matrix.profile != 'release' }}
continue-on-error: true
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }}
env:
RUST_BACKTRACE: 1
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
@@ -271,193 +217,33 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
- name: Save target cache (except gnu-dev)
if: >-
always() && !cancelled() &&
(steps.cache_target_restore.outputs.cache-hit != 'true') &&
!(matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release')
continue-on-error: true
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
- name: sccache stats
if: always()
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always()
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (${{ matrix.profile }})";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: |
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
path: ${{ github.workspace }}/codex-rs/target/
key: cargo-target-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure'
steps.cargo_check_all_crates.outcome == 'failure' ||
steps.test.outcome == 'failure'
run: |
echo "One or more checks failed (clippy or cargo_check_all_crates). See logs for details."
exit 1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
defaults:
run:
working-directory: codex-rs
env:
RUSTC_WRAPPER: sccache
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
strategy:
fail-fast: false
matrix:
include:
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- name: Install sccache
uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: sccache
version: 0.7.5
- name: Configure sccache backend
shell: bash
run: |
set -euo pipefail
if [[ -n "${ACTIONS_CACHE_URL:-}" && -n "${ACTIONS_RUNTIME_TOKEN:-}" ]]; then
echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV"
echo "Using sccache GitHub backend"
else
echo "SCCACHE_GHA_ENABLED=false" >> "$GITHUB_ENV"
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> "$GITHUB_ENV"
echo "Using sccache local disk + actions/cache fallback"
fi
- name: Restore sccache cache (fallback)
if: ${{ env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
restore-keys: |
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
with:
tool: nextest
version: 0.9.103
- name: tests
id: test
continue-on-error: true
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test
env:
RUST_BACKTRACE: 1
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
- name: sccache stats
if: always()
continue-on-error: true
run: sccache --show-stats || true
- name: sccache summary
if: always()
shell: bash
run: |
{
echo "### sccache stats — ${{ matrix.target }} (tests)";
echo;
echo '```';
sccache --show-stats || true;
echo '```';
} >> "$GITHUB_STEP_SUMMARY"
- name: verify tests passed
if: steps.test.outcome == 'failure'
run: |
echo "Tests failed. See logs for details."
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
exit 1
# --- Gatherer job that you mark as the ONLY required status -----------------
results:
name: CI results (required)
needs: [changed, general, cargo_shear, lint_build, tests]
needs: [changed, general, cargo_shear, lint_build_test]
if: always()
runs-on: ubuntu-24.04
steps:
@@ -466,8 +252,7 @@ jobs:
run: |
echo "general: ${{ needs.general.result }}"
echo "shear : ${{ needs.cargo_shear.result }}"
echo "lint : ${{ needs.lint_build.result }}"
echo "tests : ${{ needs.tests.result }}"
echo "matrix : ${{ needs.lint_build_test.result }}"
# If nothing relevant changed (PR touching only root README, etc.),
# declare success regardless of other jobs.
@@ -479,10 +264,4 @@ jobs:
# Otherwise require the jobs to have succeeded
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
[[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; }
[[ '${{ needs.lint_build.result }}' == 'success' ]] || { echo 'lint_build failed'; exit 1; }
[[ '${{ needs.tests.result }}' == 'success' ]] || { echo 'tests failed'; exit 1; }
- name: sccache summary note
if: always()
run: |
echo "Per-job sccache stats are attached to each matrix job's Step Summary."
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }

View File

@@ -58,9 +58,9 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
@@ -100,7 +100,7 @@ jobs:
- name: Cargo build
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
- if: ${{ matrix.runner == 'macos-14' }}
name: Configure Apple code signing
shell: bash
env:
@@ -185,7 +185,7 @@ jobs:
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
- if: ${{ matrix.runner == 'macos-14' }}
name: Sign macOS binaries
shell: bash
run: |
@@ -206,7 +206,7 @@ jobs:
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
- if: ${{ matrix.runner == 'macos-14' }}
name: Notarize macOS binaries
shell: bash
env:
@@ -328,7 +328,7 @@ jobs:
done
- name: Remove signing keychain
if: ${{ always() && matrix.runner == 'macos-15-xlarge' }}
if: ${{ always() && matrix.runner == 'macos-14' }}
shell: bash
env:
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}

View File

@@ -1 +1 @@
The changelog can be found on the [releases page](https://github.com/openai/codex/releases).
The changelog can be found on the [releases page](https://github.com/openai/codex/releases)

View File

@@ -33,8 +33,6 @@ Then simply run `codex` to get started:
codex
```
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
<details>
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
@@ -79,7 +77,7 @@ Codex CLI supports a rich set of configuration options, with preferences stored
- [Example prompts](./docs/getting-started.md#example-prompts)
- [Custom prompts](./docs/prompts.md)
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
- [**Configuration**](./docs/config.md)
- [Configuration](./docs/config.md)
- [**Sandbox & approvals**](./docs/sandbox.md)
- [**Authentication**](./docs/authentication.md)
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)

View File

@@ -3,8 +3,8 @@
<p align="center"><code>npm i -g @openai/codex</code></p>
> [!IMPORTANT]
> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
> [!NOTE]
> This README focuses on the native Rust CLI. For additional deep dives, see the [docs/](../docs) folder and the [root README](https://github.com/openai/codex/blob/main/README.md).
![Codex demo GIF using: codex "explain this codebase to me"](../.github/demo.gif)
@@ -94,37 +94,8 @@ export OPENAI_API_KEY="your-api-key-here"
>
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
<details>
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
>
> - openai (default)
> - openrouter
> - azure
> - gemini
> - ollama
> - mistral
> - deepseek
> - xai
> - groq
> - arceeai
> - any other provider that is compatible with the OpenAI API
>
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
>
> ```shell
> export <provider>_API_KEY="your-api-key-here"
> ```
>
> If you use a provider not listed above, you must also set the base URL for the provider:
>
> ```shell
> export <provider>_BASE_URL="https://your-provider-api-base-url"
> ```
</details>
<br />
> [!TIP]
> The CLI ships with OpenAI and local OSS providers out of the box. To add additional providers, edit the `[model_providers]` table in `~/.codex/config.toml`. See [Configuration guide](#configuration-guide) for examples.
Run interactively:
@@ -139,7 +110,7 @@ codex "explain this codebase to me"
```
```shell
codex --approval-mode full-auto "create the fanciest todo-list app"
codex --full-auto "create the fanciest todo-list app"
```
That's it - Codex will scaffold a file, run it inside a sandbox, install any
@@ -165,67 +136,61 @@ And it's **fully open-source** so you can see and contribute to how it develops!
## Security model & permissions
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
`--approval-mode` flag (or the interactive onboarding prompt):
Codex lets you decide _how much autonomy_ the agent receives via the
`--ask-for-approval` flag (or the interactive onboarding prompt). The default is `on-request`.
| Mode | What the agent may do without asking | Still requires approval |
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
| Mode (`--ask-for-approval …`) | Auto-approves | Escalates to you when… |
| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ |
| `untrusted` | Built-in "safe" commands that only read files (`ls`, `cat`, `sed`, etc.) | The model proposes writing to disk or running any other command. |
| `on-failure` | All commands, executed inside the configured sandbox with network access disabled and writes limited to the allowed directories. | A command fails in the sandbox and the model wants to retry it without sandboxing. |
| `on-request` _(default)_ | Whatever the model deems safe; it typically asks you before launching riskier commands or writing files. | The model decides it wants confirmation, or the sandbox refuses a command and the model asks to retry. |
| `never` | Everything, with no escalation. | Never; failures are returned straight to the model. |
In **Full Auto** every command is run **network-disabled** and confined to the
current working directory (plus temporary files) for defense-in-depth. Codex
will also show a warning/confirmation if you start in **auto-edit** or
**full-auto** while the directory is _not_ tracked by Git, so you always have a
safety net.
Coming soon: you'll be able to whitelist specific commands to auto-execute with
the network enabled, once we're confident in additional safeguards.
Use `codex --full-auto` as a shorthand for `--ask-for-approval on-failure --sandbox workspace-write`. For air-gapped or CI environments that provide their own isolation, `--dangerously-bypass-approvals-and-sandbox` disables both confirmation prompts and sandboxing—double-check before using it.
### Platform sandboxing details
The hardening mechanism Codex uses depends on your OS:
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
- Everything is placed in a read-only jail except for a small set of
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
- Outbound network is _fully blocked_ by default - even if a child process
- Outbound network is _fully blocked_ by default even if a child process
tries to `curl` somewhere it will fail.
- **Linux** - there is no sandboxing by default.
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
container image** and mounts your repo _read/write_ at the same path. A
custom `iptables`/`ipset` firewall script denies all egress except the
OpenAI API. This gives you deterministic, reproducible runs without needing
root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
- **Linux** - commands run through the bundled `codex-linux-sandbox` helper. It combines **Landlock** filesystem rules with a **seccomp** filter, mirroring the macOS policy: commands start network-disabled and only the working directory (plus a few temp paths) are writable. You still get escape hatches via the `--sandbox` flag:
- `--sandbox read-only` is ideal for review-only sessions.
- `--sandbox danger-full-access` turns the sandbox off. Pair it with `--ask-for-approval untrusted` if you still want Codex to double-check risky commands.
Containers (Docker/Podman) can still be useful when you want completely reproducible toolchains, GPU access, or custom OS packages. In that case launch the CLI inside your container and keep the built-in sandbox on; it will happily sandbox _inside_ the container.
---
## System requirements
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
| Node.js | **16 or newer** (Node 20 LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
| Requirement | Details |
| --------------------------- | ----------------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 22.04+/Debian 12+, or Windows 11 via WSL2 |
| Runtime dependencies | None for the packaged binaries (install via npm, Homebrew, or releases) |
| Git (optional, recommended) | 2.39+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
> Never run `sudo npm install -g`; fix npm permissions instead.
> Never run `sudo npm install -g`; fix npm or use another package manager instead.
---
## CLI reference
| Command | Purpose | Example |
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
| `codex` | Interactive REPL | `codex` |
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
| Command | Purpose | Example |
| ------------------------------------ | --------------------------------------------------- | ---------------------------------------------------- |
| `codex` | Launch the interactive TUI | `codex` |
| `codex "..."` | Seed the interactive session with an opening task | `codex "fix lint errors"` |
| `codex exec "..."` | Run a non-interactive turn in the current repo | `codex exec "count the total number of TODO comments"` |
| `codex exec --json "..."` | Stream machine-readable events as JSON Lines | `codex exec --json --full-auto "update CHANGELOG"` |
| `codex exec resume --last "..."` | Resume the most recent non-interactive session | `codex exec resume --last "ship the follow-up fix"` |
| `codex completion <bash\|zsh\|fish>` | Print shell completion script for your shell | `codex completion bash` |
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
Helpful flags: `--model/-m`, `--ask-for-approval/-a`, `--sandbox/-s`, `--oss`, `--full-auto`, `--config/-c key=value`, and `--web-search`.
---
@@ -237,8 +202,6 @@ You can give Codex extra instructions and guidance using `AGENTS.md` files. Code
2. `AGENTS.md` at repo root - shared project notes
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
---
## Non-interactive / CI mode
@@ -250,19 +213,21 @@ Run Codex head-less in pipelines. Example GitHub Action step:
run: |
npm install -g @openai/codex
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
codex -a auto-edit --quiet "update CHANGELOG for next release"
codex exec --json --full-auto "update CHANGELOG for next release" > codex.log
```
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
`codex exec` streams its progress to stderr and writes the final assistant reply to stdout. Use `--json` when you need structured output, or `-o path/to/result.json` to capture just the closing message.
## Tracing / verbose logging
Setting the environment variable `DEBUG=true` prints full API request and response details:
Set `RUST_LOG` to control structured logging. The default filter is `codex_core=info,codex_tui=info,codex_rmcp_client=info`. To turn on verbose logs for troubleshooting:
```shell
DEBUG=true codex
RUST_LOG=codex_core=debug,codex_tui=debug codex
```
Logs are written to `~/.codex/logs/codex-tui.log` in addition to stderr. You can use standard `env_logger` syntax (e.g., `RUST_LOG=info,reqwest=trace`).
---
## Recipes
@@ -302,28 +267,21 @@ pnpm add -g @openai/codex
<summary><strong>Build from source</strong></summary>
```bash
# Clone the repository and navigate to the CLI package
# Clone the repository and navigate to the workspace root
git clone https://github.com/openai/codex.git
cd codex/codex-cli
cd codex
# Enable corepack
corepack enable
# Ensure you have the latest stable Rust toolchain
rustup default stable
# Install dependencies and build
pnpm install
pnpm build
# (Optional) install just for handy automation
cargo install just
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
./scripts/install_native_deps.sh
# Build the interactive CLI
cargo build -p codex-tui
# Get the usage and the options
node ./dist/cli.js --help
# Run the locally-built CLI directly
node ./dist/cli.js
# Or link the command globally for convenience
pnpm link
# Run it directly from source
cargo run -p codex-tui -- --help
```
</details>
@@ -332,153 +290,93 @@ pnpm link
## Configuration guide
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
Codex reads configuration from `~/.codex/config.toml` (or `$CODEX_HOME/config.toml`). TOML is the only supported format. Command-line flags (`--model`, `--ask-for-approval`, `--config key=value`, etc.) override whatever is set in the file.
### Basic configuration parameters
| Parameter | Type | Default | Description | Available Options |
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
| Key | Type | Default | Description |
| ------------------ | -------- | -------------------------------------------- | ------------------------------------------------------------------------------------------------- |
| `model` | string | `gpt-5-codex` (macOS/Linux) / `gpt-5` (WSL) | Selects the default model. |
| `model_provider` | string | `openai` | Picks an entry from the `[model_providers]` table. |
| `approval_policy` | string | `on-request` | Matches the CLI `--ask-for-approval` flag (`untrusted`, `on-failure`, `on-request`, `never`). |
| `sandbox_mode` | string | `workspace-write` on trusted repos, otherwise read-only | Controls how shell commands are sandboxed (`read-only`, `workspace-write`, `danger-full-access`). |
| `notify` | array | _unset_ | Optional notifier command: e.g. `notify = ["terminal-notifier", "-message", "Codex done"]`. |
| `tui_notifications`| table | `{"approvals": true, "turns": true}` | Controls OSC 9 terminal notifications. |
| `history.persistence` | string | `save-all` | `save-all`, `commands-only`, or `none`. |
| `hide_agent_reasoning` | bool | `false` | Suppress reasoning summaries in the UI. |
### Custom AI provider configuration
Use `codex --config key=value` to experiment without editing the file. For example, `codex --config approval_policy="untrusted"`.
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
### Managing model providers
| Parameter | Type | Description | Example |
| --------- | ------ | --------------------------------------- | ----------------------------- |
| `name` | string | Display name of the provider | `"OpenAI"` |
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
The CLI bundles two providers: `openai` (Responses API) and `oss` (local models via Ollama). You can add more by extending the `model_providers` map. Entries do **not** replace the defaults; they are merged in.
### History configuration
```toml
model = "gpt-4o"
model_provider = "openai-chat"
In the `history` object, you can configure conversation history settings:
[model_providers.openai-chat]
name = "OpenAI (Chat Completions)"
base_url = "https://api.openai.com/v1"
wire_api = "chat"
env_key = "OPENAI_API_KEY"
| Parameter | Type | Description | Example Value |
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
| `maxSize` | number | Maximum number of history entries to save | `1000` |
| `saveHistory` | boolean | Whether to save history | `true` |
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
### Configuration examples
1. YAML format (save as `~/.codex/config.yaml`):
```yaml
model: o4-mini
approvalMode: suggest
fullAutoErrorMode: ask-user
notify: true
[model_providers.ollama]
name = "Ollama"
base_url = "http://localhost:11434/v1"
```
2. JSON format (save as `~/.codex/config.json`):
Set API keys by exporting the environment variable referenced by each provider (`env_key`). If you need to override headers or query parameters, add `http_headers`, `env_http_headers`, or `query_params` within the provider block. See [`docs/config.md`](../docs/config.md#model_providers) for more examples, including Azure and custom retries.
```json
{
"model": "o4-mini",
"approvalMode": "suggest",
"fullAutoErrorMode": "ask-user",
"notify": true
}
### History, profiles, and overrides
- History is controlled via the `[history]` table. Example:
```toml
[history]
persistence = "commands-only"
redact_patterns = ["api_key=*"]
```
- Use profiles to store alternative defaults:
```toml
[profiles.ops]
model = "gpt-5"
approval_policy = "untrusted"
sandbox_mode = "read-only"
```
Launch with `codex --profile ops`.
- Override individual keys for a single run: `codex --config history.persistence="none"`.
### MCP servers and instructions
Add MCP integrations with `[mcp_servers.<id>]` blocks (stdio or streamable HTTP). Refer to [`docs/config.md#mcps`](../docs/config.md#mcp-integration) for the schema.
For persistent guidance, create `AGENTS.md` files in `~/.codex`, your repo root, or subdirectories. Codex merges them from root to current directory before each turn.
### Example `config.toml`
```toml
model = "gpt-5-codex"
model_provider = "openai"
approval_policy = "untrusted"
sandbox_mode = "workspace-write"
[history]
persistence = "save-all"
[model_providers.azure]
name = "Azure"
base_url = "https://YOUR_RESOURCE_NAME.openai.azure.com/openai"
env_key = "AZURE_OPENAI_API_KEY"
wire_api = "responses"
query_params = { api-version = "2025-04-01-preview" }
```
### Full configuration example
Below is a comprehensive example of `config.json` with multiple custom providers:
```json
{
"model": "o4-mini",
"provider": "openai",
"providers": {
"openai": {
"name": "OpenAI",
"baseURL": "https://api.openai.com/v1",
"envKey": "OPENAI_API_KEY"
},
"azure": {
"name": "AzureOpenAI",
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
"envKey": "AZURE_OPENAI_API_KEY"
},
"openrouter": {
"name": "OpenRouter",
"baseURL": "https://openrouter.ai/api/v1",
"envKey": "OPENROUTER_API_KEY"
},
"gemini": {
"name": "Gemini",
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
"envKey": "GEMINI_API_KEY"
},
"ollama": {
"name": "Ollama",
"baseURL": "http://localhost:11434/v1",
"envKey": "OLLAMA_API_KEY"
},
"mistral": {
"name": "Mistral",
"baseURL": "https://api.mistral.ai/v1",
"envKey": "MISTRAL_API_KEY"
},
"deepseek": {
"name": "DeepSeek",
"baseURL": "https://api.deepseek.com",
"envKey": "DEEPSEEK_API_KEY"
},
"xai": {
"name": "xAI",
"baseURL": "https://api.x.ai/v1",
"envKey": "XAI_API_KEY"
},
"groq": {
"name": "Groq",
"baseURL": "https://api.groq.com/openai/v1",
"envKey": "GROQ_API_KEY"
},
"arceeai": {
"name": "ArceeAI",
"baseURL": "https://conductor.arcee.ai/v1",
"envKey": "ARCEEAI_API_KEY"
}
},
"history": {
"maxSize": 1000,
"saveHistory": true,
"sensitivePatterns": []
}
}
```
### Custom instructions
You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
```markdown
- Always respond with emojis
- Only use git commands when explicitly requested
```
### Environment variables setup
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
```bash
# OpenAI
export OPENAI_API_KEY="your-api-key-here"
# Azure OpenAI
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
# OpenRouter
export OPENROUTER_API_KEY="your-openrouter-key-here"
# Similarly for other providers
```
Restart Codex (or run the next command with `--config`) after editing the file to pick up changes.
---
@@ -494,7 +392,7 @@ In 2021, OpenAI released Codex, an AI system designed to generate code from natu
<details>
<summary>Which models are supported?</summary>
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
Any model available via the [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `gpt-5-codex` (or `gpt-5` on Windows/WSL), but pass `--model` or set `model = "gpt-4.1"` in `config.toml` to override.
</details>
<details>
@@ -507,13 +405,13 @@ It's possible that your [API account needs to be verified](https://help.openai.c
<details>
<summary>How do I stop Codex from editing my files?</summary>
Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
Run with `codex --ask-for-approval untrusted` or `codex --sandbox read-only` to force Codex to ask before making changes. In interactive sessions, you can also deny a specific command or patch by answering **n** when prompted.
</details>
<details>
<summary>Does it work on Windows?</summary>
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
Not natively. Use [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) and install the Linux build inside your WSL environment. We regularly test on macOS and Linux.
</details>
@@ -544,59 +442,25 @@ We're excited to launch a **$1 million initiative** supporting open source proje
## Contributing
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
This project is under active development and we currently prioritize external contributions that address bugs or security issues. If you are proposing a new feature or behavior change, please open an issue first and get confirmation from the team before investing significant effort.
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
We care deeply about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. Use this README together with the canonical [contributor guide](../docs/contributing.md).
### Development workflow
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
- Use `pnpm test:watch` during development for super-fast feedback.
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
- Before pushing, run the full test/type/lint suite:
### Git hooks with Husky
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
```bash
pnpm test && pnpm run lint && pnpm run typecheck
```
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
```text
I have read the CLA Document and I hereby sign the CLA
```
The CLA-Assistant bot will turn the PR status green once all authors have signed.
```bash
# Watch mode (tests rerun on change)
pnpm test:watch
# Type-check without emitting files
pnpm typecheck
# Automatically fix lint + prettier issues
pnpm lint:fix
pnpm format:fix
```
- Create a topic branch from `main` (for example `feat/improve-sandbox`).
- Keep changes focused; unrelated fixes should land as separate PRs.
- Install Rust 1.80+ and `just`. Most commands run from the repo root:
- `just fmt` formats all Rust code.
- `just fix -p codex-tui` runs `cargo clippy --fix` and `cargo fmt` for the TUI crate (swap the crate name as appropriate).
- `cargo test -p codex-tui` or other crate-specific test commands keep feedback fast.
- If you touch shared crates (for example `codex-core` or `codex-common`), prefer `cargo test --all-features` after the targeted suite passes.
### Debugging
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
- Run `cargo run -p codex-tui --` to launch the CLI under your debugger of choice. `cargo run -p codex-cli --bin codex-linux-sandbox -- --help` is helpful when iterating on the sandbox helper.
- Set `RUST_LOG=codex_core=debug,codex_tui=debug` to capture verbose logs (see [Tracing](#tracing--verbose-logging)).
- Use `cargo test -p <crate> -- --nocapture` to see println!/tracing output from tests while iterating on new features.
### Writing high-impact code changes
@@ -607,10 +471,10 @@ To debug the CLI with a visual debugger, do the following in the `codex-cli` fol
### Opening a pull request
- Fill in the PR template (or include similar information) - **What? Why? How?**
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
- Fill in the PR template (or include similar information) **What? Why? How?**
- Run **all** checks locally (`cargo test`, `cargo clippy --tests`, `cargo fmt -- --check`, plus any `just fix -p <crate>` you relied on). CI failures that could have been caught locally slow down the process.
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
- Mark the PR as **Ready for review** only when you believe it is in a mergeable state.
### Review process
@@ -655,29 +519,22 @@ The **DCO check** blocks merges until every commit in the PR carries the footer
### Releasing `codex`
To publish a new version of the CLI you first need to stage the npm package. A
helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
`codex-cli` folder run:
To stage npm artifacts for a release, run the helper from the repo root:
```bash
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
pnpm stage-release
# Optionally specify the temp directory to reuse between runs.
RELEASE_DIR=$(mktemp -d)
pnpm stage-release --tmp "$RELEASE_DIR"
# "Fat" package that additionally bundles the native Rust CLI binaries for
# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
pnpm stage-release --native
./scripts/stage_npm_packages.py \
--release-version 0.6.0 \
--package codex
```
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
The script assembles native binaries, hydrates the `vendor/` tree, and writes tarballs to `dist/npm/`. Inspect the generated package contents (for example by extracting them or running `npm pack --dry-run`). When satisfied:
```bash
cd dist/npm
npm publish codex-0.6.0.tgz
```
cd "$RELEASE_DIR"
npm publish
```
Add additional `--package` flags if you need to ship the responses proxy or SDK in the same release. See [`codex-cli/scripts/README.md`](./scripts/README.md) for details and troubleshooting tips.
### Alternative build options

View File

@@ -1,5 +0,0 @@
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "link-arg=/STACK:8388608"]
[target.'cfg(all(windows, target_env = "gnu"))']
rustflags = ["-C", "link-arg=-Wl,--stack,8388608"]

383
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,10 +16,11 @@ members = [
"core",
"exec",
"execpolicy",
"keyring-store",
"file-search",
"git-tooling",
"linux-sandbox",
"login",
"mcp-client",
"mcp-server",
"mcp-types",
"ollama",
@@ -31,14 +32,11 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"utils/git",
"utils/cache",
"utils/image",
"git-apply",
"utils/json-to-toml",
"utils/pty",
"utils/readiness",
"utils/pty",
"utils/string",
"utils/tokenizer",
]
resolver = "2"
@@ -59,17 +57,16 @@ codex-app-server-protocol = { path = "app-server-protocol" }
codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-exec = { path = "exec" }
codex-feedback = { path = "feedback" }
codex-file-search = { path = "file-search" }
codex-git = { path = "utils/git" }
codex-keyring-store = { path = "keyring-store" }
codex-git-tooling = { path = "git-tooling" }
codex-linux-sandbox = { path = "linux-sandbox" }
codex-login = { path = "login" }
codex-mcp-client = { path = "mcp-client" }
codex-mcp-server = { path = "mcp-server" }
codex-ollama = { path = "ollama" }
codex-otel = { path = "otel" }
@@ -80,14 +77,10 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-string = { path = "utils/string" }
codex-utils-tokenizer = { path = "utils/tokenizer" }
codex-windows-sandbox = { path = "windows-sandbox" }
core_test_support = { path = "core/tests/common" }
mcp-types = { path = "mcp-types" }
mcp_test_support = { path = "mcp-server/tests/common" }
@@ -122,13 +115,11 @@ env_logger = "0.11.5"
escargot = "0.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
http = "1.3.1"
icu_decimal = "2.1"
icu_provider = { version = "2.1", features = ["sync"] }
icu_locale_core = "2.1"
icu_decimal = "2.0.0"
icu_locale_core = "2.0.0"
ignore = "0.4.23"
image = { version = "^0.25.8", default-features = false }
indexmap = "2.12.0"
indexmap = "2.6.0"
insta = "1.43.2"
itertools = "0.14.0"
keyring = "3.6"
@@ -136,7 +127,6 @@ landlock = "0.4.1"
lazy_static = "1"
libc = "0.2.175"
log = "0.4"
lru = "0.12.5"
maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
@@ -152,6 +142,7 @@ os_info = "3.12.0"
owo-colors = "4.2.0"
paste = "1.0.15"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
pathdiff = "0.2"
portable-pty = "0.9.0"
predicates = "3"
@@ -159,10 +150,9 @@ pretty_assertions = "1.4.1"
pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex-lite = "0.1.7"
reqwest = "0.12"
rmcp = { version = "0.8.3", default-features = false }
rmcp = { version = "0.8.0", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.34.0"
@@ -182,7 +172,7 @@ sys-locale = "0.3.2"
tempfile = "3.23.0"
test-log = "0.2.18"
textwrap = "0.16.2"
thiserror = "2.0.17"
thiserror = "2.0.16"
time = "0.3"
tiny_http = "0.12"
tokio = "1"
@@ -211,7 +201,6 @@ walkdir = "2.5.0"
webbrowser = "1.0"
which = "6"
wildmatch = "2.5.0"
wiremock = "0.6"
zeroize = "1.8.1"
@@ -256,7 +245,7 @@ unwrap_used = "deny"
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness", "codex-utils-tokenizer"]
ignored = ["openssl-sys", "codex-utils-readiness"]
[profile.release]
lto = "fat"
@@ -267,11 +256,6 @@ strip = "symbols"
# See https://github.com/openai/codex/issues/1411 for details.
codegen-units = 1
[profile.ci-test]
debug = 1 # Reduce debug symbol size
inherits = "test"
opt-level = 0
[patch.crates-io]
# Uncomment to debug local changes.
# ratatui = { path = "../../ratatui" }

View File

@@ -63,9 +63,6 @@ codex sandbox macos [--full-auto] [COMMAND]...
# Linux
codex sandbox linux [--full-auto] [COMMAND]...
# Windows
codex sandbox windows [--full-auto] [COMMAND]...
# Legacy aliases
codex debug seatbelt [--full-auto] [COMMAND]...
codex debug landlock [--full-auto] [COMMAND]...

View File

@@ -16,7 +16,6 @@ use serde::Serialize;
use serde_json::Map;
use serde_json::Value;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
@@ -178,16 +177,24 @@ pub fn generate_json(out_dir: &Path) -> Result<()> {
for (name, schema) in bundle {
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(name.as_str()));
if let Value::Object(ref mut obj) = schema_value {
if let Some(defs) = obj.remove("definitions")
&& let Value::Object(defs_obj) = defs
{
for (def_name, def_schema) in defs_obj {
if !SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
definitions.insert(def_name, def_schema);
}
}
}
if let Value::Object(ref mut obj) = schema_value
&& let Some(defs) = obj.remove("definitions")
&& let Value::Object(defs_obj) = defs
{
for (def_name, mut def_schema) in defs_obj {
if !SPECIAL_DEFINITIONS.contains(&def_name.as_str()) {
annotate_schema(&mut def_schema, Some(def_name.as_str()));
definitions.insert(def_name, def_schema);
if let Some(Value::Array(one_of)) = obj.get_mut("oneOf") {
for variant in one_of.iter_mut() {
if let Some(variant_name) = variant_definition_name(&name, variant)
&& let Value::Object(variant_obj) = variant
{
variant_obj.insert("title".into(), Value::String(variant_name));
}
}
}
}
@@ -220,12 +227,9 @@ where
{
let file_stem = name.trim();
let schema = schema_for!(T);
let mut schema_value = serde_json::to_value(schema)?;
annotate_schema(&mut schema_value, Some(file_stem));
write_pretty_json(out_dir.join(format!("{file_stem}.json")), &schema_value)
write_pretty_json(out_dir.join(format!("{file_stem}.json")), &schema)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
let annotated_schema = serde_json::from_value(schema_value)?;
Ok(annotated_schema)
Ok(schema)
}
pub(crate) fn write_json_schema<T>(out_dir: &Path, name: &str) -> Result<()>
@@ -297,147 +301,11 @@ fn variant_definition_name(base: &str, variant: &Value) -> Option<String> {
}
fn literal_from_property<'a>(props: &'a Map<String, Value>, key: &str) -> Option<&'a str> {
props.get(key).and_then(string_literal)
}
fn string_literal(value: &Value) -> Option<&str> {
value.get("const").and_then(Value::as_str).or_else(|| {
value
.get("enum")
.and_then(Value::as_array)
.and_then(|arr| arr.first())
.and_then(Value::as_str)
})
}
fn annotate_schema(value: &mut Value, base: Option<&str>) {
match value {
Value::Object(map) => annotate_object(map, base),
Value::Array(items) => {
for item in items {
annotate_schema(item, base);
}
}
_ => {}
}
}
fn annotate_object(map: &mut Map<String, Value>, base: Option<&str>) {
let owner = map.get("title").and_then(Value::as_str).map(str::to_owned);
if let Some(owner) = owner.as_deref()
&& let Some(Value::Object(props)) = map.get_mut("properties")
{
set_discriminator_titles(props, owner);
}
if let Some(Value::Array(variants)) = map.get_mut("oneOf") {
annotate_variant_list(variants, base);
}
if let Some(Value::Array(variants)) = map.get_mut("anyOf") {
annotate_variant_list(variants, base);
}
if let Some(Value::Object(defs)) = map.get_mut("definitions") {
for (name, schema) in defs.iter_mut() {
annotate_schema(schema, Some(name.as_str()));
}
}
if let Some(Value::Object(defs)) = map.get_mut("$defs") {
for (name, schema) in defs.iter_mut() {
annotate_schema(schema, Some(name.as_str()));
}
}
if let Some(Value::Object(props)) = map.get_mut("properties") {
for value in props.values_mut() {
annotate_schema(value, base);
}
}
if let Some(items) = map.get_mut("items") {
annotate_schema(items, base);
}
if let Some(additional) = map.get_mut("additionalProperties") {
annotate_schema(additional, base);
}
for (key, child) in map.iter_mut() {
match key.as_str() {
"oneOf"
| "anyOf"
| "definitions"
| "$defs"
| "properties"
| "items"
| "additionalProperties" => {}
_ => annotate_schema(child, base),
}
}
}
fn annotate_variant_list(variants: &mut [Value], base: Option<&str>) {
let mut seen = HashSet::new();
for variant in variants.iter() {
if let Some(name) = variant_title(variant) {
seen.insert(name.to_owned());
}
}
for variant in variants.iter_mut() {
let mut variant_name = variant_title(variant).map(str::to_owned);
if variant_name.is_none()
&& let Some(base_name) = base
&& let Some(name) = variant_definition_name(base_name, variant)
{
let mut candidate = name.clone();
let mut index = 2;
while seen.contains(&candidate) {
candidate = format!("{name}{index}");
index += 1;
}
if let Some(obj) = variant.as_object_mut() {
obj.insert("title".into(), Value::String(candidate.clone()));
}
seen.insert(candidate.clone());
variant_name = Some(candidate);
}
if let Some(name) = variant_name.as_deref()
&& let Some(obj) = variant.as_object_mut()
&& let Some(Value::Object(props)) = obj.get_mut("properties")
{
set_discriminator_titles(props, name);
}
annotate_schema(variant, base);
}
}
const DISCRIMINATOR_KEYS: &[&str] = &["type", "method", "mode", "status", "role", "reason"];
fn set_discriminator_titles(props: &mut Map<String, Value>, owner: &str) {
for key in DISCRIMINATOR_KEYS {
if let Some(prop_schema) = props.get_mut(*key)
&& string_literal(prop_schema).is_some()
&& let Value::Object(prop_obj) = prop_schema
{
if prop_obj.contains_key("title") {
continue;
}
let suffix = to_pascal_case(key);
prop_obj.insert("title".into(), Value::String(format!("{owner}{suffix}")));
}
}
}
fn variant_title(value: &Value) -> Option<&str> {
value
.as_object()
.and_then(|obj| obj.get("title"))
props
.get(key)
.and_then(|value| value.get("enum"))
.and_then(Value::as_array)
.and_then(|arr| arr.first())
.and_then(Value::as_str)
}
@@ -534,204 +402,3 @@ fn generate_index_ts(out_dir: &Path) -> Result<PathBuf> {
.with_context(|| format!("Failed to write {}", index_path.display()))?;
Ok(index_path)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use std::collections::BTreeSet;
use std::fs;
use std::path::PathBuf;
use uuid::Uuid;
#[test]
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
fs::create_dir(&output_dir)?;
struct TempDirGuard(PathBuf);
impl Drop for TempDirGuard {
fn drop(&mut self) {
let _ = fs::remove_dir_all(&self.0);
}
}
let _guard = TempDirGuard(output_dir.clone());
generate_ts(&output_dir, None)?;
let mut undefined_offenders = Vec::new();
let mut optional_nullable_offenders = BTreeSet::new();
let mut stack = vec![output_dir];
while let Some(dir) = stack.pop() {
for entry in fs::read_dir(&dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
continue;
}
if matches!(path.extension().and_then(|ext| ext.to_str()), Some("ts")) {
let contents = fs::read_to_string(&path)?;
if contents.contains("| undefined") {
undefined_offenders.push(path.clone());
}
const SKIP_PREFIXES: &[&str] = &[
"const ",
"let ",
"var ",
"export const ",
"export let ",
"export var ",
];
let mut search_start = 0;
while let Some(idx) = contents[search_start..].find("| null") {
let abs_idx = search_start + idx;
// Find the property-colon for this field by scanning forward
// from the start of the segment and ignoring nested braces,
// brackets, and parens. This avoids colons inside nested
// type literals like `{ [k in string]?: string }`.
let line_start_idx =
contents[..abs_idx].rfind('\n').map(|i| i + 1).unwrap_or(0);
let mut segment_start_idx = line_start_idx;
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind(',') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('{') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
if let Some(rel_idx) = contents[line_start_idx..abs_idx].rfind('}') {
segment_start_idx = segment_start_idx.max(line_start_idx + rel_idx + 1);
}
// Scan forward for the colon that separates the field name from its type.
let mut level_brace = 0_i32;
let mut level_brack = 0_i32;
let mut level_paren = 0_i32;
let mut in_single = false;
let mut in_double = false;
let mut escape = false;
let mut prop_colon_idx = None;
for (i, ch) in contents[segment_start_idx..abs_idx].char_indices() {
let idx_abs = segment_start_idx + i;
if escape {
escape = false;
continue;
}
match ch {
'\\' => {
// Only treat as escape when inside a string.
if in_single || in_double {
escape = true;
}
}
'\'' => {
if !in_double {
in_single = !in_single;
}
}
'"' => {
if !in_single {
in_double = !in_double;
}
}
'{' if !in_single && !in_double => level_brace += 1,
'}' if !in_single && !in_double => level_brace -= 1,
'[' if !in_single && !in_double => level_brack += 1,
']' if !in_single && !in_double => level_brack -= 1,
'(' if !in_single && !in_double => level_paren += 1,
')' if !in_single && !in_double => level_paren -= 1,
':' if !in_single
&& !in_double
&& level_brace == 0
&& level_brack == 0
&& level_paren == 0 =>
{
prop_colon_idx = Some(idx_abs);
break;
}
_ => {}
}
}
let Some(colon_idx) = prop_colon_idx else {
search_start = abs_idx + 5;
continue;
};
let mut field_prefix = contents[segment_start_idx..colon_idx].trim();
if field_prefix.is_empty() {
search_start = abs_idx + 5;
continue;
}
if let Some(comment_idx) = field_prefix.rfind("*/") {
field_prefix = field_prefix[comment_idx + 2..].trim_start();
}
if field_prefix.is_empty() {
search_start = abs_idx + 5;
continue;
}
if SKIP_PREFIXES
.iter()
.any(|prefix| field_prefix.starts_with(prefix))
{
search_start = abs_idx + 5;
continue;
}
if field_prefix.contains('(') {
search_start = abs_idx + 5;
continue;
}
// If the last non-whitespace before ':' is '?', then this is an
// optional field with a nullable type (i.e., "?: T | null"),
// which we explicitly disallow.
if field_prefix.chars().rev().find(|c| !c.is_whitespace()) == Some('?') {
let line_number =
contents[..abs_idx].chars().filter(|c| *c == '\n').count() + 1;
let offending_line_end = contents[line_start_idx..]
.find('\n')
.map(|i| line_start_idx + i)
.unwrap_or(contents.len());
let offending_snippet =
contents[line_start_idx..offending_line_end].trim();
optional_nullable_offenders.insert(format!(
"{}:{}: {offending_snippet}",
path.display(),
line_number
));
}
search_start = abs_idx + 5;
}
}
}
}
assert!(
undefined_offenders.is_empty(),
"Generated TypeScript still includes unions with `undefined` in {undefined_offenders:?}"
);
// If this assertion fails, it means a field was generated as
// "?: T | null" — i.e., both optional (undefined) and nullable (null).
// We only want either "?: T" or ": T | null".
assert!(
optional_nullable_offenders.is_empty(),
"Generated TypeScript has optional fields with nullable types (disallowed '?: T | null'), add #[ts(optional)] to fix:\n{optional_nullable_offenders:?}"
);
Ok(())
}
}

View File

@@ -34,7 +34,6 @@ pub struct JSONRPCRequest {
pub id: RequestId,
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
@@ -43,7 +42,6 @@ pub struct JSONRPCRequest {
pub struct JSONRPCNotification {
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
@@ -65,7 +63,6 @@ pub struct JSONRPCError {
pub struct JSONRPCErrorError {
pub code: i64,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub data: Option<serde_json::Value>,
pub message: String,
}

View File

@@ -6,6 +6,4 @@ pub use export::generate_json;
pub use export::generate_ts;
pub use export::generate_types;
pub use jsonrpc_lite::*;
pub use protocol::common::*;
pub use protocol::v1::*;
pub use protocol::v2::*;
pub use protocol::*;

View File

@@ -0,0 +1,973 @@
use std::collections::HashMap;
use std::path::PathBuf;
use crate::JSONRPCNotification;
use crate::JSONRPCRequest;
use crate::RequestId;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::TurnAbortReason;
use paste::paste;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use ts_rs::TS;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
#[ts(type = "string")]
pub struct GitSha(pub String);
impl GitSha {
pub fn new(sha: &str) -> Self {
Self(sha.to_string())
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
/// client can send to the server. Each variant has associated `params` and
/// `response` types. Also generates a `export_client_responses()` function to
/// export all response types to TypeScript.
macro_rules! client_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident {
params: $(#[$params_meta:meta])* $params:ty,
response: $response:ty,
}
),* $(,)?
) => {
/// Request from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ClientRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
$(#[$params_meta])*
params: $params,
},
)*
}
pub fn export_client_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
$(
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
)*
Ok(())
}
pub fn export_client_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<()> {
$(
crate::export::write_json_schema::<$response>(out_dir, stringify!($response))?;
)*
Ok(())
}
};
}
client_request_definitions! {
Initialize {
params: InitializeParams,
response: InitializeResponse,
},
NewConversation {
params: NewConversationParams,
response: NewConversationResponse,
},
/// List recorded Codex conversations (rollouts) with optional pagination and search.
ListConversations {
params: ListConversationsParams,
response: ListConversationsResponse,
},
/// Resume a recorded Codex conversation from a rollout file.
ResumeConversation {
params: ResumeConversationParams,
response: ResumeConversationResponse,
},
ArchiveConversation {
params: ArchiveConversationParams,
response: ArchiveConversationResponse,
},
SendUserMessage {
params: SendUserMessageParams,
response: SendUserMessageResponse,
},
SendUserTurn {
params: SendUserTurnParams,
response: SendUserTurnResponse,
},
InterruptConversation {
params: InterruptConversationParams,
response: InterruptConversationResponse,
},
AddConversationListener {
params: AddConversationListenerParams,
response: AddConversationSubscriptionResponse,
},
RemoveConversationListener {
params: RemoveConversationListenerParams,
response: RemoveConversationSubscriptionResponse,
},
GitDiffToRemote {
params: GitDiffToRemoteParams,
response: GitDiffToRemoteResponse,
},
LoginApiKey {
params: LoginApiKeyParams,
response: LoginApiKeyResponse,
},
LoginChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: LoginChatGptResponse,
},
CancelLoginChatGpt {
params: CancelLoginChatGptParams,
response: CancelLoginChatGptResponse,
},
LogoutChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: LogoutChatGptResponse,
},
GetAuthStatus {
params: GetAuthStatusParams,
response: GetAuthStatusResponse,
},
GetUserSavedConfig {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: GetUserSavedConfigResponse,
},
SetDefaultModel {
params: SetDefaultModelParams,
response: SetDefaultModelResponse,
},
GetUserAgent {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: GetUserAgentResponse,
},
UserInfo {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: UserInfoResponse,
},
FuzzyFileSearch {
params: FuzzyFileSearchParams,
response: FuzzyFileSearchResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
ExecOneOffCommand {
params: ExecOneOffCommandParams,
response: ExecOneOffCommandResponse,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub client_info: ClientInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ClientInfo {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
pub version: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationParams {
/// Optional override for the model name (e.g. "o3", "o4-mini").
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Configuration profile from config.toml to specify default options.
#[serde(skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
/// Working directory for the session. If relative, it is resolved against
/// the server process's current working directory.
#[serde(skip_serializing_if = "Option::is_none")]
pub cwd: Option<String>,
/// Approval policy for shell commands generated by the model:
/// `untrusted`, `on-failure`, `on-request`, `never`.
#[serde(skip_serializing_if = "Option::is_none")]
pub approval_policy: Option<AskForApproval>,
/// Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox: Option<SandboxMode>,
/// Individual config settings that will override what is in
/// CODEX_HOME/config.toml.
#[serde(skip_serializing_if = "Option::is_none")]
pub config: Option<HashMap<String, serde_json::Value>>,
/// The set of instructions to use instead of the default ones.
#[serde(skip_serializing_if = "Option::is_none")]
pub base_instructions: Option<String>,
/// Whether to include the plan tool in the conversation.
#[serde(skip_serializing_if = "Option::is_none")]
pub include_plan_tool: Option<bool>,
/// Whether to include the apply patch tool in the conversation.
#[serde(skip_serializing_if = "Option::is_none")]
pub include_apply_patch_tool: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
/// Note this could be ignored by the model.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_messages: Option<Vec<EventMsg>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsParams {
/// Optional page size; defaults to a reasonable server-side value.
#[serde(skip_serializing_if = "Option::is_none")]
pub page_size: Option<usize>,
/// Opaque pagination cursor returned by a previous call.
#[serde(skip_serializing_if = "Option::is_none")]
pub cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ConversationSummary {
pub conversation_id: ConversationId,
pub path: PathBuf,
pub preview: String,
/// RFC3339 timestamp string for the session start, if available.
#[serde(skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsResponse {
pub items: Vec<ConversationSummary>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
#[serde(skip_serializing_if = "Option::is_none")]
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
/// Absolute path to the rollout JSONL file.
pub path: PathBuf,
/// Optional overrides to apply when spawning the resumed session.
#[serde(skip_serializing_if = "Option::is_none")]
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationSubscriptionResponse {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
/// The [`ConversationId`] must match the `rollout_path`.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationParams {
pub conversation_id: ConversationId,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationSubscriptionResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyParams {
pub api_key: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptResponse {
#[schemars(with = "String")]
pub login_id: Uuid,
/// URL the client should open in a browser to initiate the OAuth flow.
pub auth_url: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteResponse {
pub sha: GitSha,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptParams {
#[schemars(with = "String")]
pub login_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteParams {
pub cwd: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptParams {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusParams {
/// If true, include the current auth token (if available) in the response.
#[serde(skip_serializing_if = "Option::is_none")]
pub include_token: Option<bool>,
/// If true, attempt to refresh the token before returning status.
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandParams {
/// Command argv to execute.
pub command: Vec<String>,
/// Timeout of the command in milliseconds.
/// If not specified, a sensible default is used server-side.
pub timeout_ms: Option<u64>,
/// Optional working directory for the process. Defaults to server config cwd.
#[serde(skip_serializing_if = "Option::is_none")]
pub cwd: Option<PathBuf>,
/// Optional explicit sandbox policy overriding the server default.
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub auth_method: Option<AuthMode>,
#[serde(skip_serializing_if = "Option::is_none")]
pub auth_token: Option<String>,
// Indicates that auth method must be valid to use the server.
// This can be false if using a custom provider that is configured
// with requires_openai_auth == false.
#[serde(skip_serializing_if = "Option::is_none")]
pub requires_openai_auth: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserAgentResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserInfoResponse {
/// Note: `alleged_user_email` is not currently verified. We read it from
/// the local auth.json, which the user could theoretically modify. In the
/// future, we may add logic to verify the email against the server before
/// returning it.
pub alleged_user_email: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
/// If set to None, this means `model` should be cleared in config.toml.
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// If set to None, this means `model_reasoning_effort` should be cleared
/// in config.toml.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
/// UserSavedConfig contains a subset of the config. It is meant to expose mcp
/// client-configurable settings that can be specified in the NewConversation
/// and SendUserTurn requests.
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserSavedConfig {
/// Approvals
#[serde(skip_serializing_if = "Option::is_none")]
pub approval_policy: Option<AskForApproval>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_mode: Option<SandboxMode>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_settings: Option<SandboxSettings>,
#[serde(skip_serializing_if = "Option::is_none")]
pub forced_chatgpt_workspace_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub forced_login_method: Option<ForcedLoginMethod>,
/// Model-specific configuration
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_reasoning_effort: Option<ReasoningEffort>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_reasoning_summary: Option<ReasoningSummary>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_verbosity: Option<Verbosity>,
/// Tools
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Tools>,
/// Profiles
#[serde(skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
#[serde(default)]
pub profiles: HashMap<String, Profile>,
}
/// MCP representation of a [`codex_core::config_profile::ConfigProfile`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
/// The key in the `model_providers` map identifying the
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
/// MCP representation of a [`codex_core::config::ToolsToml`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Tools {
#[serde(skip_serializing_if = "Option::is_none")]
pub web_search: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub view_image: Option<bool>,
}
/// MCP representation of a [`codex_core::config_types::SandboxWorkspaceWrite`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
#[serde(skip_serializing_if = "Option::is_none")]
pub network_access: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exclude_tmpdir_env_var: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exclude_slash_tmp: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffort>,
pub summary: ReasoningSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationParams {
pub conversation_id: ConversationId,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationResponse {
pub abort_reason: TurnAbortReason,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationListenerParams {
pub conversation_id: ConversationId,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationListenerParams {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type", content = "data")]
pub enum InputItem {
Text {
text: String,
},
/// Preencoded data: URI image.
Image {
image_url: String,
},
/// Local image path provided by the user. This will be converted to an
/// `Image` variant (base64 data URL) during request serialization.
LocalImage {
path: PathBuf,
},
}
/// Generates an `enum ServerRequest` where each variant is a request that the
/// server can send to the client along with the corresponding params and
/// response types. It also generates helper types used by the app/server
/// infrastructure (payload enum, request constructor, and export helpers).
macro_rules! server_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident
),* $(,)?
) => {
paste! {
/// Request initiated from the server and sent to the client.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ServerRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
params: [<$variant Params>],
},
)*
}
#[derive(Debug, Clone, PartialEq, JsonSchema)]
pub enum ServerRequestPayload {
$( $variant([<$variant Params>]), )*
}
impl ServerRequestPayload {
pub fn request_with_id(self, request_id: RequestId) -> ServerRequest {
match self {
$(Self::$variant(params) => ServerRequest::$variant { request_id, params },)*
}
}
}
}
pub fn export_server_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
paste! {
$(<[<$variant Response>] as ::ts_rs::TS>::export_all_to(out_dir)?;)*
}
Ok(())
}
pub fn export_server_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<()> {
paste! {
$(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?;)*
}
Ok(())
}
};
}
impl TryFrom<JSONRPCRequest> for ServerRequest {
type Error = serde_json::Error;
fn try_from(value: JSONRPCRequest) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
server_request_definitions! {
/// Request to approve a patch.
ApplyPatchApproval,
/// Request to exec a command.
ExecCommandApproval,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ApplyPatchApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
/// and [codex_core::protocol::PatchApplyEndEvent].
pub call_id: String,
pub file_changes: HashMap<PathBuf, FileChange>,
/// Optional explanatory reason (e.g. request for extra write access).
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
/// When set, the agent is asking the user to allow writes under this root
/// for the remainder of the session (unclear if this is honored today).
#[serde(skip_serializing_if = "Option::is_none")]
pub grant_root: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecCommandApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
/// and [codex_core::protocol::ExecCommandEndEvent].
pub call_id: String,
pub command: Vec<String>,
pub cwd: PathBuf,
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
pub parsed_cmd: Vec<ParsedCommand>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ExecCommandApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ApplyPatchApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
pub struct FuzzyFileSearchParams {
pub query: String,
pub roots: Vec<String>,
// if provided, will cancel any previous request that used the same value
#[serde(skip_serializing_if = "Option::is_none")]
pub cancellation_token: Option<String>,
}
/// Superset of [`codex_file_search::FileMatch`]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResult {
pub root: String,
pub path: String,
pub file_name: String,
pub score: u32,
#[serde(skip_serializing_if = "Option::is_none")]
pub indices: Option<Vec<u32>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResponse {
pub files: Vec<FuzzyFileSearchResult>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptCompleteNotification {
#[schemars(with = "String")]
pub login_id: Uuid,
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SessionConfiguredNotification {
/// Name left as session_id instead of conversation_id for backwards compatibility.
pub session_id: ConversationId,
/// Tell the client what model is being queried.
pub model: String,
/// The effort the model is putting into reasoning about the user's request.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
/// Identifier of the history log file (inode on Unix, 0 otherwise).
pub history_log_id: u64,
/// Current number of entries in the history log.
#[ts(type = "number")]
pub history_entry_count: usize,
/// Optional initial messages (as events) for resumed sessions.
/// When present, UIs can use these to seed the history.
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AuthStatusChangeNotification {
/// Current authentication method; omitted if signed out.
#[serde(skip_serializing_if = "Option::is_none")]
pub auth_method: Option<AuthMode>,
}
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
/// Authentication status changed
AuthStatusChange(AuthStatusChangeNotification),
/// ChatGPT login flow completed
LoginChatGptComplete(LoginChatGptCompleteNotification),
/// The special session configured event for a new or resumed conversation.
SessionConfigured(SessionConfiguredNotification),
}
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
ServerNotification::AuthStatusChange(params) => serde_json::to_value(params),
ServerNotification::LoginChatGptComplete(params) => serde_json::to_value(params),
ServerNotification::SessionConfigured(params) => serde_json::to_value(params),
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
/// Notification sent from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
Initialized,
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use pretty_assertions::assert_eq;
use serde_json::json;
#[test]
fn serialize_new_conversation() -> Result<()> {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: NewConversationParams {
model: Some("gpt-5-codex".to_string()),
profile: None,
cwd: None,
approval_policy: Some(AskForApproval::OnRequest),
sandbox: None,
config: None,
base_instructions: None,
include_plan_tool: None,
include_apply_patch_tool: None,
},
};
assert_eq!(
json!({
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5-codex",
"approvalPolicy": "on-request"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn conversation_id_serializes_as_plain_string() -> Result<()> {
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
assert_eq!(
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
serde_json::to_value(id)?
);
Ok(())
}
#[test]
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
let id: ConversationId =
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
assert_eq!(
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
id,
);
Ok(())
}
#[test]
fn serialize_client_notification() -> Result<()> {
let notification = ClientNotification::Initialized;
// Note there is no "params" field for this notification.
assert_eq!(
json!({
"method": "initialized",
}),
serde_json::to_value(&notification)?,
);
Ok(())
}
#[test]
fn serialize_server_request() -> Result<()> {
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
let params = ExecCommandApprovalParams {
conversation_id,
call_id: "call-42".to_string(),
command: vec!["echo".to_string(), "hello".to_string()],
cwd: PathBuf::from("/tmp"),
reason: Some("because tests".to_string()),
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
};
let request = ServerRequest::ExecCommandApproval {
request_id: RequestId::Integer(7),
params: params.clone(),
};
assert_eq!(
json!({
"method": "execCommandApproval",
"id": 7,
"params": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"callId": "call-42",
"command": ["echo", "hello"],
"cwd": "/tmp",
"reason": "because tests",
"parsedCmd": [
{
"type": "unknown",
"cmd": "echo hello"
}
]
}
}),
serde_json::to_value(&request)?,
);
let payload = ServerRequestPayload::ExecCommandApproval(params);
assert_eq!(payload.request_with_id(RequestId::Integer(7)), request);
Ok(())
}
}

View File

@@ -1,685 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use crate::JSONRPCNotification;
use crate::JSONRPCRequest;
use crate::RequestId;
use crate::protocol::v1;
use crate::protocol::v2;
use codex_protocol::ConversationId;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxCommandAssessment;
use paste::paste;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use ts_rs::TS;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, TS)]
#[ts(type = "string")]
pub struct GitSha(pub String);
impl GitSha {
pub fn new(sha: &str) -> Self {
Self(sha.to_string())
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
/// client can send to the server. Each variant has associated `params` and
/// `response` types. Also generates a `export_client_responses()` function to
/// export all response types to TypeScript.
macro_rules! client_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident {
params: $(#[$params_meta:meta])* $params:ty,
response: $response:ty,
}
),* $(,)?
) => {
/// Request from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ClientRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
$(#[$params_meta])*
params: $params,
},
)*
}
pub fn export_client_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
$(
<$response as ::ts_rs::TS>::export_all_to(out_dir)?;
)*
Ok(())
}
pub fn export_client_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<()> {
$(
crate::export::write_json_schema::<$response>(out_dir, stringify!($response))?;
)*
Ok(())
}
};
}
client_request_definitions! {
/// NEW APIs
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]
ListModels {
params: v2::ListModelsParams,
response: v2::ListModelsResponse,
},
#[serde(rename = "account/login")]
#[ts(rename = "account/login")]
LoginAccount {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
},
#[serde(rename = "account/logout")]
#[ts(rename = "account/logout")]
LogoutAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::LogoutAccountResponse,
},
#[serde(rename = "account/rateLimits/read")]
#[ts(rename = "account/rateLimits/read")]
GetAccountRateLimits {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountRateLimitsResponse,
},
#[serde(rename = "feedback/upload")]
#[ts(rename = "feedback/upload")]
UploadFeedback {
params: v2::UploadFeedbackParams,
response: v2::UploadFeedbackResponse,
},
#[serde(rename = "account/read")]
#[ts(rename = "account/read")]
GetAccount {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::GetAccountResponse,
},
/// DEPRECATED APIs below
Initialize {
params: v1::InitializeParams,
response: v1::InitializeResponse,
},
NewConversation {
params: v1::NewConversationParams,
response: v1::NewConversationResponse,
},
GetConversationSummary {
params: v1::GetConversationSummaryParams,
response: v1::GetConversationSummaryResponse,
},
/// List recorded Codex conversations (rollouts) with optional pagination and search.
ListConversations {
params: v1::ListConversationsParams,
response: v1::ListConversationsResponse,
},
/// Resume a recorded Codex conversation from a rollout file.
ResumeConversation {
params: v1::ResumeConversationParams,
response: v1::ResumeConversationResponse,
},
ArchiveConversation {
params: v1::ArchiveConversationParams,
response: v1::ArchiveConversationResponse,
},
SendUserMessage {
params: v1::SendUserMessageParams,
response: v1::SendUserMessageResponse,
},
SendUserTurn {
params: v1::SendUserTurnParams,
response: v1::SendUserTurnResponse,
},
InterruptConversation {
params: v1::InterruptConversationParams,
response: v1::InterruptConversationResponse,
},
AddConversationListener {
params: v1::AddConversationListenerParams,
response: v1::AddConversationSubscriptionResponse,
},
RemoveConversationListener {
params: v1::RemoveConversationListenerParams,
response: v1::RemoveConversationSubscriptionResponse,
},
GitDiffToRemote {
params: v1::GitDiffToRemoteParams,
response: v1::GitDiffToRemoteResponse,
},
LoginApiKey {
params: v1::LoginApiKeyParams,
response: v1::LoginApiKeyResponse,
},
LoginChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LoginChatGptResponse,
},
CancelLoginChatGpt {
params: v1::CancelLoginChatGptParams,
response: v1::CancelLoginChatGptResponse,
},
LogoutChatGpt {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::LogoutChatGptResponse,
},
GetAuthStatus {
params: v1::GetAuthStatusParams,
response: v1::GetAuthStatusResponse,
},
GetUserSavedConfig {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserSavedConfigResponse,
},
SetDefaultModel {
params: v1::SetDefaultModelParams,
response: v1::SetDefaultModelResponse,
},
GetUserAgent {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::GetUserAgentResponse,
},
UserInfo {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v1::UserInfoResponse,
},
FuzzyFileSearch {
params: FuzzyFileSearchParams,
response: FuzzyFileSearchResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
ExecOneOffCommand {
params: v1::ExecOneOffCommandParams,
response: v1::ExecOneOffCommandResponse,
},
}
/// Generates an `enum ServerRequest` where each variant is a request that the
/// server can send to the client along with the corresponding params and
/// response types. It also generates helper types used by the app/server
/// infrastructure (payload enum, request constructor, and export helpers).
macro_rules! server_request_definitions {
(
$(
$(#[$variant_meta:meta])*
$variant:ident
),* $(,)?
) => {
paste! {
/// Request initiated from the server and sent to the client.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "method", rename_all = "camelCase")]
pub enum ServerRequest {
$(
$(#[$variant_meta])*
$variant {
#[serde(rename = "id")]
request_id: RequestId,
params: [<$variant Params>],
},
)*
}
#[derive(Debug, Clone, PartialEq, JsonSchema)]
pub enum ServerRequestPayload {
$( $variant([<$variant Params>]), )*
}
impl ServerRequestPayload {
pub fn request_with_id(self, request_id: RequestId) -> ServerRequest {
match self {
$(Self::$variant(params) => ServerRequest::$variant { request_id, params },)*
}
}
}
}
pub fn export_server_responses(
out_dir: &::std::path::Path,
) -> ::std::result::Result<(), ::ts_rs::ExportError> {
paste! {
$(<[<$variant Response>] as ::ts_rs::TS>::export_all_to(out_dir)?;)*
}
Ok(())
}
pub fn export_server_response_schemas(
out_dir: &::std::path::Path,
) -> ::anyhow::Result<()> {
paste! {
$(crate::export::write_json_schema::<[<$variant Response>]>(out_dir, stringify!([<$variant Response>]))?;)*
}
Ok(())
}
};
}
impl TryFrom<JSONRPCRequest> for ServerRequest {
type Error = serde_json::Error;
fn try_from(value: JSONRPCRequest) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
server_request_definitions! {
/// Request to approve a patch.
ApplyPatchApproval,
/// Request to exec a command.
ExecCommandApproval,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ApplyPatchApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
/// and [codex_core::protocol::PatchApplyEndEvent].
pub call_id: String,
pub file_changes: HashMap<PathBuf, FileChange>,
/// Optional explanatory reason (e.g. request for extra write access).
pub reason: Option<String>,
/// When set, the agent is asking the user to allow writes under this root
/// for the remainder of the session (unclear if this is honored today).
pub grant_root: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecCommandApprovalParams {
pub conversation_id: ConversationId,
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
/// and [codex_core::protocol::ExecCommandEndEvent].
pub call_id: String,
pub command: Vec<String>,
pub cwd: PathBuf,
pub reason: Option<String>,
pub risk: Option<SandboxCommandAssessment>,
pub parsed_cmd: Vec<ParsedCommand>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ExecCommandApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct ApplyPatchApprovalResponse {
pub decision: ReviewDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
pub struct FuzzyFileSearchParams {
pub query: String,
pub roots: Vec<String>,
// if provided, will cancel any previous request that used the same value
pub cancellation_token: Option<String>,
}
/// Superset of [`codex_file_search::FileMatch`]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResult {
pub root: String,
pub path: String,
pub file_name: String,
pub score: u32,
pub indices: Option<Vec<u32>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
pub struct FuzzyFileSearchResponse {
pub files: Vec<FuzzyFileSearchResult>,
}
/// Notification sent from the server to the client.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {
/// NEW NOTIFICATIONS
#[serde(rename = "account/rateLimits/updated")]
#[ts(rename = "account/rateLimits/updated")]
#[strum(serialize = "account/rateLimits/updated")]
AccountRateLimitsUpdated(RateLimitSnapshot),
/// DEPRECATED NOTIFICATIONS below
/// Authentication status changed
AuthStatusChange(v1::AuthStatusChangeNotification),
/// ChatGPT login flow completed
LoginChatGptComplete(v1::LoginChatGptCompleteNotification),
/// The special session configured event for a new or resumed conversation.
SessionConfigured(v1::SessionConfiguredNotification),
}
impl ServerNotification {
pub fn to_params(self) -> Result<serde_json::Value, serde_json::Error> {
match self {
ServerNotification::AccountRateLimitsUpdated(params) => serde_json::to_value(params),
ServerNotification::AuthStatusChange(params) => serde_json::to_value(params),
ServerNotification::LoginChatGptComplete(params) => serde_json::to_value(params),
ServerNotification::SessionConfigured(params) => serde_json::to_value(params),
}
}
}
impl TryFrom<JSONRPCNotification> for ServerNotification {
type Error = serde_json::Error;
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
serde_json::from_value(serde_json::to_value(value)?)
}
}
/// Notification sent from the client to the server.
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS, Display)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ClientNotification {
Initialized,
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use codex_protocol::account::PlanType;
use codex_protocol::protocol::AskForApproval;
use pretty_assertions::assert_eq;
use serde_json::json;
#[test]
fn serialize_new_conversation() -> Result<()> {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("gpt-5-codex".to_string()),
model_provider: None,
profile: None,
cwd: None,
approval_policy: Some(AskForApproval::OnRequest),
sandbox: None,
config: None,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
include_apply_patch_tool: None,
},
};
assert_eq!(
json!({
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5-codex",
"modelProvider": null,
"profile": null,
"cwd": null,
"approvalPolicy": "on-request",
"sandbox": null,
"config": null,
"baseInstructions": null,
"includeApplyPatchTool": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn conversation_id_serializes_as_plain_string() -> Result<()> {
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
assert_eq!(
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
serde_json::to_value(id)?
);
Ok(())
}
#[test]
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
let id: ConversationId =
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
assert_eq!(
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
id,
);
Ok(())
}
#[test]
fn serialize_client_notification() -> Result<()> {
let notification = ClientNotification::Initialized;
// Note there is no "params" field for this notification.
assert_eq!(
json!({
"method": "initialized",
}),
serde_json::to_value(&notification)?,
);
Ok(())
}
#[test]
fn serialize_server_request() -> Result<()> {
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
let params = ExecCommandApprovalParams {
conversation_id,
call_id: "call-42".to_string(),
command: vec!["echo".to_string(), "hello".to_string()],
cwd: PathBuf::from("/tmp"),
reason: Some("because tests".to_string()),
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
};
let request = ServerRequest::ExecCommandApproval {
request_id: RequestId::Integer(7),
params: params.clone(),
};
assert_eq!(
json!({
"method": "execCommandApproval",
"id": 7,
"params": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"callId": "call-42",
"command": ["echo", "hello"],
"cwd": "/tmp",
"reason": "because tests",
"risk": null,
"parsedCmd": [
{
"type": "unknown",
"cmd": "echo hello"
}
]
}
}),
serde_json::to_value(&request)?,
);
let payload = ServerRequestPayload::ExecCommandApproval(params);
assert_eq!(payload.request_with_id(RequestId::Integer(7)), request);
Ok(())
}
#[test]
fn serialize_get_account_rate_limits() -> Result<()> {
let request = ClientRequest::GetAccountRateLimits {
request_id: RequestId::Integer(1),
params: None,
};
assert_eq!(
json!({
"method": "account/rateLimits/read",
"id": 1,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_api_key() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(2),
params: v2::LoginAccountParams::ApiKey {
api_key: "secret".to_string(),
},
};
assert_eq!(
json!({
"method": "account/login",
"id": 2,
"params": {
"type": "apiKey",
"apiKey": "secret"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_chatgpt() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(3),
params: v2::LoginAccountParams::ChatGpt,
};
assert_eq!(
json!({
"method": "account/login",
"id": 3,
"params": {
"type": "chatgpt"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_logout() -> Result<()> {
let request = ClientRequest::LogoutAccount {
request_id: RequestId::Integer(4),
params: None,
};
assert_eq!(
json!({
"method": "account/logout",
"id": 4,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(5),
params: None,
};
assert_eq!(
json!({
"method": "account/read",
"id": 5,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn account_serializes_fields_in_camel_case() -> Result<()> {
let api_key = v2::Account::ApiKey {
api_key: "secret".to_string(),
};
assert_eq!(
json!({
"type": "apiKey",
"apiKey": "secret",
}),
serde_json::to_value(&api_key)?,
);
let chatgpt = v2::Account::ChatGpt {
email: Some("user@example.com".to_string()),
plan_type: PlanType::Plus,
};
assert_eq!(
json!({
"type": "chatgpt",
"email": "user@example.com",
"planType": "plus",
}),
serde_json::to_value(&chatgpt)?,
);
Ok(())
}
#[test]
fn serialize_list_models() -> Result<()> {
let request = ClientRequest::ListModels {
request_id: RequestId::Integer(6),
params: v2::ListModelsParams::default(),
};
assert_eq!(
json!({
"method": "model/list",
"id": 6,
"params": {
"pageSize": null,
"cursor": null
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -1,6 +0,0 @@
// Module declarations for the app-server protocol namespace.
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
pub mod common;
pub mod v1;
pub mod v2;

View File

@@ -1,405 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::TurnAbortReason;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
use uuid::Uuid;
// Reuse shared types defined in `common.rs`.
use crate::protocol::common::AuthMode;
use crate::protocol::common::GitSha;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub client_info: ClientInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ClientInfo {
pub name: String,
pub title: Option<String>,
pub version: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationParams {
pub model: Option<String>,
pub model_provider: Option<String>,
pub profile: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub developer_instructions: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(untagged)]
pub enum GetConversationSummaryParams {
RolloutPath {
#[serde(rename = "rolloutPath")]
rollout_path: PathBuf,
},
ConversationId {
#[serde(rename = "conversationId")]
conversation_id: ConversationId,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetConversationSummaryResponse {
pub summary: ConversationSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsParams {
pub page_size: Option<usize>,
pub cursor: Option<String>,
pub model_providers: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ConversationSummary {
pub conversation_id: ConversationId,
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub model_provider: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListConversationsResponse {
pub items: Vec<ConversationSummary>,
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
pub path: Option<PathBuf>,
pub conversation_id: Option<ConversationId>,
pub history: Option<Vec<ResponseItem>>,
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationSubscriptionResponse {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationParams {
pub conversation_id: ConversationId,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveConversationResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationSubscriptionResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyParams {
pub api_key: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptResponse {
#[schemars(with = "String")]
pub login_id: Uuid,
pub auth_url: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteResponse {
pub sha: GitSha,
pub diff: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptParams {
#[schemars(with = "String")]
pub login_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GitDiffToRemoteParams {
pub cwd: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct CancelLoginChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptParams {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutChatGptResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusParams {
pub include_token: Option<bool>,
pub refresh_token: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandParams {
pub command: Vec<String>,
pub timeout_ms: Option<u64>,
pub cwd: Option<PathBuf>,
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecOneOffCommandResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAuthStatusResponse {
pub auth_method: Option<AuthMode>,
pub auth_token: Option<String>,
pub requires_openai_auth: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserAgentResponse {
pub user_agent: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserInfoResponse {
pub alleged_user_email: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
pub model: Option<String>,
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserSavedConfig {
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub sandbox_settings: Option<SandboxSettings>,
pub forced_chatgpt_workspace_id: Option<String>,
pub forced_login_method: Option<ForcedLoginMethod>,
pub model: Option<String>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub tools: Option<Tools>,
pub profile: Option<String>,
pub profiles: HashMap<String, Profile>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Tools {
pub web_search: Option<bool>,
pub view_image: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
pub network_access: Option<bool>,
pub exclude_tmpdir_env_var: Option<bool>,
pub exclude_slash_tmp: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnParams {
pub conversation_id: ConversationId,
pub items: Vec<InputItem>,
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub model: String,
pub effort: Option<ReasoningEffort>,
pub summary: ReasoningSummary,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserTurnResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationParams {
pub conversation_id: ConversationId,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct InterruptConversationResponse {
pub abort_reason: TurnAbortReason,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SendUserMessageResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationListenerParams {
pub conversation_id: ConversationId,
#[serde(default)]
pub experimental_raw_events: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationListenerParams {
#[schemars(with = "String")]
pub subscription_id: Uuid,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type", content = "data")]
pub enum InputItem {
Text { text: String },
Image { image_url: String },
LocalImage { path: PathBuf },
}
// Deprecated notifications (v1)
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptCompleteNotification {
#[schemars(with = "String")]
pub login_id: Uuid,
pub success: bool,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct SessionConfiguredNotification {
pub session_id: ConversationId,
pub model: String,
pub reasoning_effort: Option<ReasoningEffort>,
pub history_log_id: u64,
#[ts(type = "number")]
pub history_entry_count: usize,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AuthStatusChangeNotification {
pub auth_method: Option<AuthMode>,
}

View File

@@ -1,122 +0,0 @@
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::protocol::RateLimitSnapshot;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
pub enum Account {
#[serde(rename = "apiKey", rename_all = "camelCase")]
#[ts(rename = "apiKey", rename_all = "camelCase")]
ApiKey { api_key: String },
#[serde(rename = "chatgpt", rename_all = "camelCase")]
#[ts(rename = "chatgpt", rename_all = "camelCase")]
ChatGpt {
email: Option<String>,
plan_type: PlanType,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type")]
#[ts(tag = "type")]
pub enum LoginAccountParams {
#[serde(rename = "apiKey")]
#[ts(rename = "apiKey")]
ApiKey {
#[serde(rename = "apiKey")]
#[ts(rename = "apiKey")]
api_key: String,
},
#[serde(rename = "chatgpt")]
#[ts(rename = "chatgpt")]
ChatGpt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginAccountResponse {
/// Only set if the login method is ChatGPT.
#[schemars(with = "String")]
pub login_id: Option<Uuid>,
/// URL the client should open in a browser to initiate the OAuth flow.
/// Only set if the login method is ChatGPT.
pub auth_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAccountRateLimitsResponse {
pub rate_limits: RateLimitSnapshot,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetAccountResponse {
pub account: Account,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListModelsParams {
/// Optional page size; defaults to a reasonable server-side value.
pub page_size: Option<usize>,
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Model {
pub id: String,
pub model: String,
pub display_name: String,
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
// Only one model should be marked as default.
pub is_default: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ReasoningEffortOption {
pub reasoning_effort: ReasoningEffort,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListModelsResponse {
pub items: Vec<Model>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UploadFeedbackParams {
pub classification: String,
pub reason: Option<String>,
pub conversation_id: Option<ConversationId>,
pub include_logs: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct UploadFeedbackResponse {
pub thread_id: String,
}

View File

@@ -19,14 +19,11 @@ anyhow = { workspace = true }
codex-arg0 = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-feedback = { workspace = true }
codex-utils-json-to-toml = { workspace = true }
chrono = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
@@ -38,7 +35,6 @@ tokio = { workspace = true, features = [
] }
tracing = { workspace = true, features = ["log"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
opentelemetry-appender-tracing = { workspace = true }
uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
@@ -48,7 +44,6 @@ base64 = { workspace = true }
core_test_support = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
serial_test = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
wiremock = { workspace = true }

View File

@@ -1,7 +1,6 @@
use crate::error_code::INTERNAL_ERROR_CODE;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::fuzzy_file_search::run_fuzzy_file_search;
use crate::models::supported_models;
use crate::outgoing_message::OutgoingMessageSender;
use crate::outgoing_message::OutgoingNotification;
use codex_app_server_protocol::AddConversationListenerParams;
@@ -10,7 +9,6 @@ use codex_app_server_protocol::ApplyPatchApprovalParams;
use codex_app_server_protocol::ApplyPatchApprovalResponse;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::ArchiveConversationResponse;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::AuthStatusChangeNotification;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::ConversationSummary;
@@ -20,9 +18,6 @@ use codex_app_server_protocol::ExecOneOffCommandParams;
use codex_app_server_protocol::ExecOneOffCommandResponse;
use codex_app_server_protocol::FuzzyFileSearchParams;
use codex_app_server_protocol::FuzzyFileSearchResponse;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::GetConversationSummaryParams;
use codex_app_server_protocol::GetConversationSummaryResponse;
use codex_app_server_protocol::GetUserAgentResponse;
use codex_app_server_protocol::GetUserSavedConfigResponse;
use codex_app_server_protocol::GitDiffToRemoteResponse;
@@ -32,8 +27,6 @@ use codex_app_server_protocol::InterruptConversationResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListConversationsResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
@@ -54,57 +47,51 @@ use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::SessionConfiguredNotification;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::SetDefaultModelResponse;
use codex_app_server_protocol::UploadFeedbackParams;
use codex_app_server_protocol::UploadFeedbackResponse;
use codex_app_server_protocol::UserInfoResponse;
use codex_app_server_protocol::UserSavedConfig;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::Cursor as RolloutCursor;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::InitialHistory;
use codex_core::NewConversation;
use codex_core::RolloutRecorder;
use codex_core::SessionMeta;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::get_auth_file;
use codex_core::auth::login_with_api_key;
use codex_core::auth::try_read_auth_json;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config_loader::load_config_as_toml;
use codex_core::config::load_config_as_toml;
use codex_core::config_edit::CONFIG_KEY_EFFORT;
use codex_core::config_edit::CONFIG_KEY_MODEL;
use codex_core::config_edit::persist_overrides_and_clear_if_none;
use codex_core::default_client::get_codex_user_agent;
use codex_core::exec::ExecParams;
use codex_core::exec_env::create_env;
use codex_core::find_conversation_path_by_id_str;
use codex_core::get_platform_sandbox;
use codex_core::git_info::git_diff_to_remote;
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecApprovalRequestEvent;
use codex_core::protocol::InputItem as CoreInputItem;
use codex_core::protocol::Op;
use codex_core::protocol::ReviewDecision;
use codex_core::read_head_for_summary;
use codex_feedback::CodexFeedback;
use codex_login::ServerOptions as LoginServerOptions;
use codex_login::ShutdownHandle;
use codex_login::run_login_server;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::InputMessageKind;
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
use codex_protocol::user_input::UserInput as CoreInputItem;
use codex_utils_json_to_toml::json_to_toml;
use std::collections::HashMap;
use std::ffi::OsStr;
use std::io::Error as IoError;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
@@ -120,6 +107,7 @@ use uuid::Uuid;
// Duration before a ChatGPT login attempt is abandoned.
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
struct ActiveLogin {
shutdown_handle: ShutdownHandle,
login_id: Uuid,
@@ -143,7 +131,6 @@ pub(crate) struct CodexMessageProcessor {
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
feedback: CodexFeedback,
}
impl CodexMessageProcessor {
@@ -153,7 +140,6 @@ impl CodexMessageProcessor {
outgoing: Arc<OutgoingMessageSender>,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
feedback: CodexFeedback,
) -> Self {
Self {
auth_manager,
@@ -165,7 +151,6 @@ impl CodexMessageProcessor {
active_login: Arc::new(Mutex::new(None)),
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
pending_fuzzy_searches: Arc::new(Mutex::new(HashMap::new())),
feedback,
}
}
@@ -180,36 +165,9 @@ impl CodexMessageProcessor {
// created before processing any subsequent messages.
self.process_new_conversation(request_id, params).await;
}
ClientRequest::GetConversationSummary { request_id, params } => {
self.get_conversation_summary(request_id, params).await;
}
ClientRequest::ListConversations { request_id, params } => {
self.handle_list_conversations(request_id, params).await;
}
ClientRequest::ListModels { request_id, params } => {
self.list_models(request_id, params).await;
}
ClientRequest::LoginAccount {
request_id,
params: _,
} => {
self.send_unimplemented_error(request_id, "account/login")
.await;
}
ClientRequest::LogoutAccount {
request_id,
params: _,
} => {
self.send_unimplemented_error(request_id, "account/logout")
.await;
}
ClientRequest::GetAccount {
request_id,
params: _,
} => {
self.send_unimplemented_error(request_id, "account/read")
.await;
}
ClientRequest::ResumeConversation { request_id, params } => {
self.handle_resume_conversation(request_id, params).await;
}
@@ -282,27 +240,9 @@ impl CodexMessageProcessor {
ClientRequest::ExecOneOffCommand { request_id, params } => {
self.exec_one_off_command(request_id, params).await;
}
ClientRequest::GetAccountRateLimits {
request_id,
params: _,
} => {
self.get_account_rate_limits(request_id).await;
}
ClientRequest::UploadFeedback { request_id, params } => {
self.upload_feedback(request_id, params).await;
}
}
}
async fn send_unimplemented_error(&self, request_id: RequestId, method: &str) {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("{method} is not implemented yet"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
async fn login_api_key(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
if matches!(
self.config.forced_login_method,
@@ -324,11 +264,7 @@ impl CodexMessageProcessor {
}
}
match login_with_api_key(
&self.config.codex_home,
&params.api_key,
self.config.cli_auth_credentials_store_mode,
) {
match login_with_api_key(&self.config.codex_home, &params.api_key) {
Ok(()) => {
self.auth_manager.reload();
self.outgoing
@@ -372,7 +308,6 @@ impl CodexMessageProcessor {
config.codex_home.clone(),
CLIENT_ID.to_string(),
config.forced_chatgpt_workspace_id.clone(),
config.cli_auth_credentials_store_mode,
)
};
@@ -592,53 +527,6 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn get_account_rate_limits(&self, request_id: RequestId) {
match self.fetch_account_rate_limits().await {
Ok(rate_limits) => {
let response = GetAccountRateLimitsResponse { rate_limits };
self.outgoing.send_response(request_id, response).await;
}
Err(error) => {
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn fetch_account_rate_limits(&self) -> Result<RateLimitSnapshot, JSONRPCErrorError> {
let Some(auth) = self.auth_manager.auth() else {
return Err(JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "codex account authentication required to read rate limits".to_string(),
data: None,
});
};
if auth.mode != AuthMode::ChatGPT {
return Err(JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "chatgpt authentication required to read rate limits".to_string(),
data: None,
});
}
let client = BackendClient::from_auth(self.config.chatgpt_base_url.clone(), &auth)
.await
.map_err(|err| JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to construct backend client: {err}"),
data: None,
})?;
client
.get_rate_limits()
.await
.map_err(|err| JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to fetch codex rate limits: {err}"),
data: None,
})
}
async fn get_user_saved_config(&self, request_id: RequestId) {
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
Ok(val) => val,
@@ -675,8 +563,12 @@ impl CodexMessageProcessor {
}
async fn get_user_info(&self, request_id: RequestId) {
// Read alleged user email from cached auth (best-effort; not verified).
let alleged_user_email = self.auth_manager.auth().and_then(|a| a.get_account_email());
// Read alleged user email from auth.json (best-effort; not verified).
let auth_path = get_auth_file(&self.config.codex_home);
let alleged_user_email = match try_read_auth_json(&auth_path) {
Ok(auth) => auth.tokens.and_then(|t| t.id_token.email),
Err(_) => None,
};
let response = UserInfoResponse { alleged_user_email };
self.outgoing.send_response(request_id, response).await;
@@ -687,12 +579,19 @@ impl CodexMessageProcessor {
model,
reasoning_effort,
} = params;
let effort_str = reasoning_effort.map(|effort| effort.to_string());
match ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(self.config.active_profile.as_deref())
.set_model(model.as_deref(), reasoning_effort)
.apply()
.await
let overrides: [(&[&str], Option<&str>); 2] = [
(&[CONFIG_KEY_MODEL], model.as_deref()),
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
];
match persist_overrides_and_clear_if_none(
&self.config.codex_home,
self.config.active_profile.as_deref(),
&overrides,
)
.await
{
Ok(()) => {
let response = SetDefaultModelResponse {};
@@ -701,7 +600,7 @@ impl CodexMessageProcessor {
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to persist model selection: {err}"),
message: format!("failed to persist overrides: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
@@ -823,101 +722,24 @@ impl CodexMessageProcessor {
}
}
async fn get_conversation_summary(
&self,
request_id: RequestId,
params: GetConversationSummaryParams,
) {
let path = match params {
GetConversationSummaryParams::RolloutPath { rollout_path } => {
if rollout_path.is_relative() {
self.config.codex_home.join(&rollout_path)
} else {
rollout_path
}
}
GetConversationSummaryParams::ConversationId { conversation_id } => {
match codex_core::find_conversation_path_by_id_str(
&self.config.codex_home,
&conversation_id.to_string(),
)
.await
{
Ok(Some(p)) => p,
_ => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!(
"no rollout found for conversation id {conversation_id}"
),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
}
}
};
let fallback_provider = self.config.model_provider_id.as_str();
match read_summary_from_rollout(&path, fallback_provider).await {
Ok(summary) => {
let response = GetConversationSummaryResponse { summary };
self.outgoing.send_response(request_id, response).await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!(
"failed to load conversation summary from {}: {}",
path.display(),
err
),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn handle_list_conversations(
&self,
request_id: RequestId,
params: ListConversationsParams,
) {
let ListConversationsParams {
page_size,
cursor,
model_providers: model_provider,
} = params;
let page_size = page_size.unwrap_or(25);
let page_size = params.page_size.unwrap_or(25);
// Decode the optional cursor string to a Cursor via serde (Cursor implements Deserialize from string)
let cursor_obj: Option<RolloutCursor> = match cursor {
let cursor_obj: Option<RolloutCursor> = match params.cursor {
Some(s) => serde_json::from_str::<RolloutCursor>(&format!("\"{s}\"")).ok(),
None => None,
};
let cursor_ref = cursor_obj.as_ref();
let model_provider_filter = match model_provider {
Some(providers) => {
if providers.is_empty() {
None
} else {
Some(providers)
}
}
None => Some(vec![self.config.model_provider_id.clone()]),
};
let model_provider_slice = model_provider_filter.as_deref();
let fallback_provider = self.config.model_provider_id.clone();
let page = match RolloutRecorder::list_conversations(
&self.config.codex_home,
page_size,
cursor_ref,
INTERACTIVE_SESSION_SOURCES,
model_provider_slice,
fallback_provider.as_str(),
)
.await
{
@@ -936,7 +758,7 @@ impl CodexMessageProcessor {
let items = page
.items
.into_iter()
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
.filter_map(|it| extract_conversation_summary(it.path, &it.head))
.collect();
// Encode next_cursor as a plain string
@@ -952,72 +774,13 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn list_models(&self, request_id: RequestId, params: ListModelsParams) {
let ListModelsParams { page_size, cursor } = params;
let models = supported_models();
let total = models.len();
if total == 0 {
let response = ListModelsResponse {
items: Vec::new(),
next_cursor: None,
};
self.outgoing.send_response(request_id, response).await;
return;
}
let effective_page_size = page_size.unwrap_or(total).max(1).min(total);
let start = match cursor {
Some(cursor) => match cursor.parse::<usize>() {
Ok(idx) => idx,
Err(_) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid cursor: {cursor}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
},
None => 0,
};
if start > total {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("cursor {start} exceeds total models {total}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let end = start.saturating_add(effective_page_size).min(total);
let items = models[start..end].to_vec();
let next_cursor = if end < total {
Some(end.to_string())
} else {
None
};
let response = ListModelsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
}
async fn handle_resume_conversation(
&self,
request_id: RequestId,
params: ResumeConversationParams,
) {
let ResumeConversationParams {
path,
conversation_id,
history,
overrides,
} = params;
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
let config = match overrides {
let config = match params.overrides {
Some(overrides) => {
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone()).await
}
@@ -1026,88 +789,21 @@ impl CodexMessageProcessor {
let config = match config {
Ok(cfg) => cfg,
Err(err) => {
self.send_invalid_request_error(
request_id,
format!("error deriving config: {err}"),
)
.await;
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("error deriving config: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
let conversation_history = if let Some(path) = path {
match RolloutRecorder::get_rollout_history(&path).await {
Ok(initial_history) => initial_history,
Err(err) => {
self.send_invalid_request_error(
request_id,
format!("failed to load rollout `{}`: {err}", path.display()),
)
.await;
return;
}
}
} else if let Some(conversation_id) = conversation_id {
match find_conversation_path_by_id_str(
&self.config.codex_home,
&conversation_id.to_string(),
)
.await
{
Ok(Some(found_path)) => {
match RolloutRecorder::get_rollout_history(&found_path).await {
Ok(initial_history) => initial_history,
Err(err) => {
self.send_invalid_request_error(
request_id,
format!(
"failed to load rollout `{}` for conversation {conversation_id}: {err}",
found_path.display()
),
).await;
return;
}
}
}
Ok(None) => {
self.send_invalid_request_error(
request_id,
format!("no rollout found for conversation id {conversation_id}"),
)
.await;
return;
}
Err(err) => {
self.send_invalid_request_error(
request_id,
format!("failed to locate conversation id {conversation_id}: {err}"),
)
.await;
return;
}
}
} else {
match history {
Some(history) if !history.is_empty() => InitialHistory::Forked(
history.into_iter().map(RolloutItem::ResponseItem).collect(),
),
Some(_) | None => {
self.send_invalid_request_error(
request_id,
"either path, conversation id or non empty history must be provided"
.to_string(),
)
.await;
return;
}
}
};
match self
.conversation_manager
.resume_conversation_with_history(
.resume_conversation_from_rollout(
config,
conversation_history,
params.path.clone(),
self.auth_manager.clone(),
)
.await
@@ -1130,16 +826,24 @@ impl CodexMessageProcessor {
},
))
.await;
let initial_messages = session_configured
.initial_messages
.map(|msgs| msgs.into_iter().collect());
let initial_messages = session_configured.initial_messages.map(|msgs| {
msgs.into_iter()
.filter(|event| {
// Don't send non-plain user messages (like user instructions
// or environment context) back so they don't get rendered.
if let EventMsg::UserMessage(user_message) = event {
return matches!(user_message.kind, Some(InputMessageKind::Plain));
}
true
})
.collect()
});
// Reply with conversation id + model and initial messages (when present)
let response = codex_app_server_protocol::ResumeConversationResponse {
conversation_id,
model: session_configured.model.clone(),
initial_messages,
rollout_path: session_configured.rollout_path.clone(),
};
self.outgoing.send_response(request_id, response).await;
}
@@ -1154,15 +858,6 @@ impl CodexMessageProcessor {
}
}
async fn send_invalid_request_error(&self, request_id: RequestId, message: String) {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message,
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
async fn archive_conversation(&self, request_id: RequestId, params: ArchiveConversationParams) {
let ArchiveConversationParams {
conversation_id,
@@ -1427,10 +1122,7 @@ impl CodexMessageProcessor {
request_id: RequestId,
params: AddConversationListenerParams,
) {
let AddConversationListenerParams {
conversation_id,
experimental_raw_events,
} = params;
let AddConversationListenerParams { conversation_id } = params;
let Ok(conversation) = self
.conversation_manager
.get_conversation(conversation_id)
@@ -1467,11 +1159,6 @@ impl CodexMessageProcessor {
}
};
if let EventMsg::RawResponseItem(_) = &event.msg
&& !experimental_raw_events {
continue;
}
// For now, we send a notification for every event,
// JSON-serializing the `Event` as-is, but these should
// be migrated to be variants of `ServerNotification`
@@ -1589,77 +1276,6 @@ impl CodexMessageProcessor {
let response = FuzzyFileSearchResponse { files: results };
self.outgoing.send_response(request_id, response).await;
}
async fn upload_feedback(&self, request_id: RequestId, params: UploadFeedbackParams) {
let UploadFeedbackParams {
classification,
reason,
conversation_id,
include_logs,
} = params;
let snapshot = self.feedback.snapshot(conversation_id);
let thread_id = snapshot.thread_id.clone();
let validated_rollout_path = if include_logs {
match conversation_id {
Some(conv_id) => self.resolve_rollout_path(conv_id).await,
None => None,
}
} else {
None
};
let upload_result = tokio::task::spawn_blocking(move || {
let rollout_path_ref = validated_rollout_path.as_deref();
snapshot.upload_feedback(
&classification,
reason.as_deref(),
include_logs,
rollout_path_ref,
)
})
.await;
let upload_result = match upload_result {
Ok(result) => result,
Err(join_err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to upload feedback: {join_err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
match upload_result {
Ok(()) => {
let response = UploadFeedbackResponse { thread_id };
self.outgoing.send_response(request_id, response).await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to upload feedback: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn resolve_rollout_path(&self, conversation_id: ConversationId) -> Option<PathBuf> {
match self
.conversation_manager
.get_conversation(conversation_id)
.await
{
Ok(conv) => Some(conv.rollout_path()),
Err(_) => None,
}
}
}
async fn apply_bespoke_event_handling(
@@ -1697,7 +1313,6 @@ async fn apply_bespoke_event_handling(
command,
cwd,
reason,
risk,
parsed_cmd,
}) => {
let params = ExecCommandApprovalParams {
@@ -1706,7 +1321,6 @@ async fn apply_bespoke_event_handling(
command,
cwd,
reason,
risk,
parsed_cmd,
};
let rx = outgoing
@@ -1718,15 +1332,6 @@ async fn apply_bespoke_event_handling(
on_exec_approval_response(event_id, rx, conversation).await;
});
}
EventMsg::TokenCount(token_count_event) => {
if let Some(rate_limits) = token_count_event.rate_limits {
outgoing
.send_server_notification(ServerNotification::AccountRateLimitsUpdated(
rate_limits,
))
.await;
}
}
// If this is a TurnAborted, reply to any pending interrupt requests.
EventMsg::TurnAborted(turn_aborted_event) => {
let pending = {
@@ -1753,15 +1358,13 @@ async fn derive_config_from_params(
) -> std::io::Result<Config> {
let NewConversationParams {
model,
model_provider,
profile,
cwd,
approval_policy,
sandbox: sandbox_mode,
config: cli_overrides,
base_instructions,
developer_instructions,
compact_prompt,
include_plan_tool,
include_apply_patch_tool,
} = params;
let overrides = ConfigOverrides {
@@ -1771,15 +1374,14 @@ async fn derive_config_from_params(
cwd: cwd.map(PathBuf::from),
approval_policy,
sandbox_mode,
model_provider,
model_provider: None,
codex_linux_sandbox_exe,
base_instructions,
developer_instructions,
compact_prompt,
include_plan_tool,
include_apply_patch_tool,
include_view_image_tool: None,
show_raw_agent_reasoning: None,
tools_web_search_request: None,
experimental_sandbox_command_assessment: None,
additional_writable_roots: Vec::new(),
};
@@ -1870,54 +1472,9 @@ async fn on_exec_approval_response(
}
}
async fn read_summary_from_rollout(
path: &Path,
fallback_provider: &str,
) -> std::io::Result<ConversationSummary> {
let head = read_head_for_summary(path).await?;
let Some(first) = head.first() else {
return Err(IoError::other(format!(
"rollout at {} is empty",
path.display()
)));
};
let session_meta = serde_json::from_value::<SessionMeta>(first.clone()).map_err(|_| {
IoError::other(format!(
"rollout at {} does not start with session metadata",
path.display()
))
})?;
if let Some(summary) =
extract_conversation_summary(path.to_path_buf(), &head, fallback_provider)
{
return Ok(summary);
}
let timestamp = if session_meta.timestamp.is_empty() {
None
} else {
Some(session_meta.timestamp.clone())
};
let model_provider = session_meta
.model_provider
.unwrap_or_else(|| fallback_provider.to_string());
Ok(ConversationSummary {
conversation_id: session_meta.id,
timestamp,
path: path.to_path_buf(),
preview: String::new(),
model_provider,
})
}
fn extract_conversation_summary(
path: PathBuf,
head: &[serde_json::Value],
fallback_provider: &str,
) -> Option<ConversationSummary> {
let session_meta = match head.first() {
Some(first_line) => serde_json::from_value::<SessionMeta>(first_line.clone()).ok()?,
@@ -1927,8 +1484,18 @@ fn extract_conversation_summary(
let preview = head
.iter()
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
.find_map(|item| match codex_core::parse_turn_item(&item) {
Some(TurnItem::UserMessage(user)) => Some(user.message()),
.find_map(|item| match item {
ResponseItem::Message { content, .. } => {
content.into_iter().find_map(|content| match content {
ContentItem::InputText { text } => {
match InputMessageKind::from(("user", &text)) {
InputMessageKind::Plain => Some(text),
_ => None,
}
}
_ => None,
})
}
_ => None,
})?;
@@ -1942,17 +1509,12 @@ fn extract_conversation_summary(
} else {
Some(session_meta.timestamp.clone())
};
let conversation_id = session_meta.id;
let model_provider = session_meta
.model_provider
.unwrap_or_else(|| fallback_provider.to_string());
Some(ConversationSummary {
conversation_id,
conversation_id: session_meta.id,
timestamp,
path,
preview: preview.to_string(),
model_provider,
})
}
@@ -1962,7 +1524,6 @@ mod tests {
use anyhow::Result;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
#[test]
fn extract_conversation_summary_prefers_plain_user_messages() -> Result<()> {
@@ -1977,8 +1538,7 @@ mod tests {
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
"model_provider": "test-provider"
"instructions": null
}),
json!({
"type": "message",
@@ -1998,62 +1558,15 @@ mod tests {
}),
];
let summary =
extract_conversation_summary(path.clone(), &head, "test-provider").expect("summary");
let summary = extract_conversation_summary(path.clone(), &head).expect("summary");
let expected = ConversationSummary {
conversation_id,
timestamp,
path,
preview: "Count to 5".to_string(),
model_provider: "test-provider".to_string(),
};
assert_eq!(summary, expected);
Ok(())
}
#[tokio::test]
async fn read_summary_from_rollout_returns_empty_preview_when_no_user_message() -> Result<()> {
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::RolloutLine;
use codex_protocol::protocol::SessionMetaLine;
use std::fs;
let temp_dir = TempDir::new()?;
let path = temp_dir.path().join("rollout.jsonl");
let conversation_id = ConversationId::from_string("bfd12a78-5900-467b-9bc5-d3d35df08191")?;
let timestamp = "2025-09-05T16:53:11.850Z".to_string();
let session_meta = SessionMeta {
id: conversation_id,
timestamp: timestamp.clone(),
model_provider: None,
..SessionMeta::default()
};
let line = RolloutLine {
timestamp: timestamp.clone(),
item: RolloutItem::SessionMeta(SessionMetaLine {
meta: session_meta.clone(),
git: None,
}),
};
fs::write(&path, format!("{}\n", serde_json::to_string(&line)?))?;
let summary = read_summary_from_rollout(path.as_path(), "fallback").await?;
let expected = ConversationSummary {
conversation_id,
timestamp: Some(timestamp),
path: path.clone(),
preview: String::new(),
model_provider: "fallback".to_string(),
};
assert_eq!(summary, expected);
assert_eq!(summary.conversation_id, conversation_id);
assert_eq!(
summary.timestamp,
Some("2025-09-05T16:53:11.850Z".to_string())
);
assert_eq!(summary.path, path);
assert_eq!(summary.preview, "Count to 5");
Ok(())
}
}

View File

@@ -46,7 +46,6 @@ pub(crate) async fn run_fuzzy_file_search(
threads,
cancel_flag,
COMPUTE_INDICES,
true,
) {
Ok(res) => Ok((root, res)),
Err(err) => Err((root, err)),

View File

@@ -1,38 +1,32 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_app_server_protocol::JSONRPCMessage;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::io::{self};
use tokio::sync::mpsc;
use tracing::Level;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use crate::message_processor::MessageProcessor;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
mod codex_message_processor;
mod error_code;
mod fuzzy_file_search;
mod message_processor;
mod models;
mod outgoing_message;
/// Size of the bounded channels used to communicate between tasks. The value
@@ -44,6 +38,13 @@ pub async fn run_main(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
) -> IoResult<()> {
// Install a simple subscriber so `tracing` output is visible. Users can
// control the log level with `RUST_LOG`.
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(EnvFilter::from_default_env())
.init();
// Set up channels.
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded_channel::<OutgoingMessage>();
@@ -85,38 +86,6 @@ pub async fn run_main(
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
})?;
let feedback = CodexFeedback::new();
let otel =
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION")).map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidData,
format!("error loading otel config: {e}"),
)
})?;
// Install a simple subscriber so `tracing` output is visible. Users can
// control the log level with `RUST_LOG`.
let stderr_fmt = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_filter(EnvFilter::from_default_env());
let feedback_layer = tracing_subscriber::fmt::layer()
.with_writer(feedback.make_writer())
.with_ansi(false)
.with_target(false)
.with_filter(Targets::new().with_default(Level::TRACE));
let _ = tracing_subscriber::registry()
.with(stderr_fmt)
.with(feedback_layer)
.with(otel.as_ref().map(|provider| {
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
)
}))
.try_init();
// Task: process incoming messages.
let processor_handle = tokio::spawn({
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
@@ -124,7 +93,6 @@ pub async fn run_main(
outgoing_message_sender,
codex_linux_sandbox_exe,
std::sync::Arc::new(config),
feedback.clone(),
);
async move {
while let Some(msg) = incoming_rx.recv().await {

View File

@@ -17,7 +17,6 @@ use codex_core::ConversationManager;
use codex_core::config::Config;
use codex_core::default_client::USER_AGENT_SUFFIX;
use codex_core::default_client::get_codex_user_agent;
use codex_feedback::CodexFeedback;
use codex_protocol::protocol::SessionSource;
use std::sync::Arc;
@@ -34,14 +33,9 @@ impl MessageProcessor {
outgoing: OutgoingMessageSender,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
feedback: CodexFeedback,
) -> Self {
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
config.cli_auth_credentials_store_mode,
);
let auth_manager = AuthManager::shared(config.codex_home.clone(), false);
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::VSCode,
@@ -52,7 +46,6 @@ impl MessageProcessor {
outgoing.clone(),
codex_linux_sandbox_exe,
config,
feedback,
);
Self {

View File

@@ -1,38 +0,0 @@
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()
}
fn model_from_preset(preset: ModelPreset) -> Model {
Model {
id: preset.id.to_string(),
model: preset.model.to_string(),
display_name: preset.display_name.to_string(),
description: preset.description.to_string(),
supported_reasoning_efforts: reasoning_efforts_from_preset(
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
is_default: preset.is_default,
}
}
fn reasoning_efforts_from_preset(
efforts: &'static [ReasoningEffortPreset],
) -> Vec<ReasoningEffortOption> {
efforts
.iter()
.map(|preset| ReasoningEffortOption {
reasoning_effort: preset.effort,
description: preset.description.to_string(),
})
.collect()
}

View File

@@ -142,8 +142,6 @@ pub(crate) struct OutgoingError {
#[cfg(test)]
mod tests {
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use uuid::Uuid;
@@ -166,7 +164,6 @@ mod tests {
"params": {
"loginId": Uuid::nil(),
"success": true,
"error": null,
},
}),
serde_json::to_value(jsonrpc_notification)
@@ -174,34 +171,4 @@ mod tests {
"ensure the strum macros serialize the method field correctly"
);
}
#[test]
fn verify_account_rate_limits_notification_serialization() {
let notification = ServerNotification::AccountRateLimitsUpdated(RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 25.0,
window_minutes: Some(15),
resets_at: Some(123),
}),
secondary: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!({
"method": "account/rateLimits/updated",
"params": {
"primary": {
"used_percent": 25.0,
"window_minutes": 15,
"resets_at": 123,
},
"secondary": null,
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
}

View File

@@ -9,10 +9,7 @@ path = "lib.rs"
[dependencies]
anyhow = { workspace = true }
assert_cmd = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [

View File

@@ -1,135 +0,0 @@
use std::path::Path;
use anyhow::Context;
use anyhow::Result;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use chrono::DateTime;
use chrono::Utc;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::save_auth;
use codex_core::token_data::TokenData;
use codex_core::token_data::parse_id_token;
use serde_json::json;
/// Builder for writing a fake ChatGPT auth.json in tests.
#[derive(Debug, Clone)]
pub struct ChatGptAuthFixture {
access_token: String,
refresh_token: String,
account_id: Option<String>,
claims: ChatGptIdTokenClaims,
last_refresh: Option<Option<DateTime<Utc>>>,
}
impl ChatGptAuthFixture {
pub fn new(access_token: impl Into<String>) -> Self {
Self {
access_token: access_token.into(),
refresh_token: "refresh-token".to_string(),
account_id: None,
claims: ChatGptIdTokenClaims::default(),
last_refresh: None,
}
}
pub fn refresh_token(mut self, refresh_token: impl Into<String>) -> Self {
self.refresh_token = refresh_token.into();
self
}
pub fn account_id(mut self, account_id: impl Into<String>) -> Self {
self.account_id = Some(account_id.into());
self
}
pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self {
self.claims.plan_type = Some(plan_type.into());
self
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.claims.email = Some(email.into());
self
}
pub fn last_refresh(mut self, last_refresh: Option<DateTime<Utc>>) -> Self {
self.last_refresh = Some(last_refresh);
self
}
pub fn claims(mut self, claims: ChatGptIdTokenClaims) -> Self {
self.claims = claims;
self
}
}
#[derive(Debug, Clone, Default)]
pub struct ChatGptIdTokenClaims {
pub email: Option<String>,
pub plan_type: Option<String>,
}
impl ChatGptIdTokenClaims {
pub fn new() -> Self {
Self::default()
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.email = Some(email.into());
self
}
pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self {
self.plan_type = Some(plan_type.into());
self
}
}
pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
let header = json!({ "alg": "none", "typ": "JWT" });
let mut payload = serde_json::Map::new();
if let Some(email) = &claims.email {
payload.insert("email".to_string(), json!(email));
}
if let Some(plan_type) = &claims.plan_type {
payload.insert(
"https://api.openai.com/auth".to_string(),
json!({ "chatgpt_plan_type": plan_type }),
);
}
let payload = serde_json::Value::Object(payload);
let header_b64 =
URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).context("serialize jwt header")?);
let payload_b64 =
URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).context("serialize jwt payload")?);
let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature");
Ok(format!("{header_b64}.{payload_b64}.{signature_b64}"))
}
pub fn write_chatgpt_auth(
codex_home: &Path,
fixture: ChatGptAuthFixture,
cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> Result<()> {
let id_token_raw = encode_id_token(&fixture.claims)?;
let id_token = parse_id_token(&id_token_raw).context("parse id token")?;
let tokens = TokenData {
id_token,
access_token: fixture.access_token,
refresh_token: fixture.refresh_token,
account_id: fixture.account_id,
};
let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now()));
let auth = AuthDotJson {
openai_api_key: None,
tokens: Some(tokens),
last_refresh,
};
save_auth(codex_home, &auth, cli_auth_credentials_store_mode).context("write auth.json")
}

View File

@@ -1,12 +1,7 @@
mod auth_fixtures;
mod mcp_process;
mod mock_model_server;
mod responses;
pub use auth_fixtures::ChatGptAuthFixture;
pub use auth_fixtures::ChatGptIdTokenClaims;
pub use auth_fixtures::encode_id_token;
pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_chat_completions_server;

View File

@@ -21,7 +21,6 @@ use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
@@ -30,7 +29,6 @@ use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::UploadFeedbackParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
@@ -238,20 +236,6 @@ impl McpProcess {
self.send_request("getUserAgent", None).await
}
/// Send an `account/rateLimits/read` JSON-RPC request.
pub async fn send_get_account_rate_limits_request(&mut self) -> anyhow::Result<i64> {
self.send_request("account/rateLimits/read", None).await
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_upload_feedback_request(
&mut self,
params: UploadFeedbackParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("feedback/upload", params).await
}
/// Send a `userInfo` JSON-RPC request.
pub async fn send_user_info_request(&mut self) -> anyhow::Result<i64> {
self.send_request("userInfo", None).await
@@ -275,15 +259,6 @@ impl McpProcess {
self.send_request("listConversations", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
params: ListModelsParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("model/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::ArchiveConversationParams;
@@ -8,37 +9,45 @@ use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn archive_conversation_moves_rollout_into_archived_directory() {
let codex_home = TempDir::new().expect("create temp dir");
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("initialize timeout")
.expect("initialize request");
let new_request_id = mcp
.send_new_conversation_request(NewConversationParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
.await
.expect("send newConversation");
let new_response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_request_id)),
)
.await??;
.await
.expect("newConversation timeout")
.expect("newConversation response");
let NewConversationResponse {
conversation_id,
rollout_path,
..
} = to_response::<NewConversationResponse>(new_response)?;
} = to_response::<NewConversationResponse>(new_response)
.expect("deserialize newConversation response");
assert!(
rollout_path.exists(),
@@ -51,15 +60,19 @@ async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<
conversation_id,
rollout_path: rollout_path.clone(),
})
.await?;
.await
.expect("send archiveConversation");
let archive_response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_request_id)),
)
.await??;
.await
.expect("archiveConversation timeout")
.expect("archiveConversation response");
let _: ArchiveConversationResponse =
to_response::<ArchiveConversationResponse>(archive_response)?;
to_response::<ArchiveConversationResponse>(archive_response)
.expect("deserialize archiveConversation response");
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
let archived_rollout_path =
@@ -77,8 +90,6 @@ async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<
"expected archived rollout path {} to exist",
archived_rollout_path.display()
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::AuthMode;
@@ -10,7 +11,6 @@ use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::RequestId;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -71,99 +71,125 @@ forced_login_method = "{forced_method}"
std::fs::write(config_toml, contents)
}
async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) {
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: api_key.to_string(),
})
.await?;
.await
.unwrap_or_else(|e| panic!("send loginApiKey: {e}"));
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let _: LoginApiKeyResponse = to_response(resp)?;
Ok(())
.await
.unwrap_or_else(|e| panic!("loginApiKey timeout: {e}"))
.unwrap_or_else(|e| panic!("loginApiKey response: {e}"));
let _: LoginApiKeyResponse =
to_response(resp).unwrap_or_else(|e| panic!("deserialize login response: {e}"));
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_no_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn get_auth_status_no_auth() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
.await
.expect("send getAuthStatus");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
.await
.expect("getAuthStatus timeout")
.expect("getAuthStatus response");
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, None, "expected no auth method");
assert_eq!(status.auth_token, None, "expected no token");
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn get_auth_status_with_api_key() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
.await
.expect("send getAuthStatus");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
.await
.expect("getAuthStatus timeout")
.expect("getAuthStatus response");
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_custom_provider(codex_home.path(), false)?;
async fn get_auth_status_with_api_key_when_auth_not_required() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml_custom_provider(codex_home.path(), false)
.unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
.await
.expect("send getAuthStatus");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
.await
.expect("getAuthStatus timeout")
.expect("getAuthStatus response");
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, None, "expected no auth method");
assert_eq!(status.auth_token, None, "expected no token");
assert_eq!(
@@ -171,60 +197,76 @@ async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
Some(false),
"requires_openai_auth should be false",
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn get_auth_status_with_api_key_no_include_token() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
// Build params via struct so None field is omitted in wire JSON.
let params = GetAuthStatusParams {
include_token: None,
refresh_token: Some(false),
};
let request_id = mcp.send_get_auth_status_request(params).await?;
let request_id = mcp
.send_get_auth_status_request(params)
.await
.expect("send getAuthStatus");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(resp)?;
.await
.expect("getAuthStatus timeout")
.expect("getAuthStatus response");
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert!(status.auth_token.is_none(), "token must be omitted");
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn login_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_login(codex_home.path(), "chatgpt")?;
async fn login_api_key_rejected_when_forced_chatgpt() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml_forced_login(codex_home.path(), "chatgpt")
.unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: "sk-test-key".to_string(),
})
.await?;
.await
.expect("send loginApiKey");
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("loginApiKey error timeout")
.expect("loginApiKey error");
assert_eq!(
err.error.message,
"API key login is disabled. Use ChatGPT login instead."
);
Ok(())
}

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
@@ -29,29 +30,29 @@ use codex_protocol::config_types::SandboxMode;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::InputMessageKind;
use pretty_assertions::assert_eq;
use std::env;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
async fn test_codex_jsonrpc_conversation_flow() {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
return;
}
let tmp = TempDir::new()?;
let tmp = TempDir::new().expect("tmp dir");
// Temporary Codex home with config pointing at the mock server.
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
std::fs::create_dir(&codex_home).expect("create codex home dir");
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
std::fs::create_dir(&working_directory).expect("create working directory");
// Create a mock model server that immediately ends each turn.
// Two turns are expected: initial session configure + one user message.
@@ -61,15 +62,20 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
Some(&working_directory),
Some(5000),
"call1234",
)?,
create_final_assistant_message_sse_response("Enjoy your new git repo!")?,
)
.expect("create shell sse response"),
create_final_assistant_message_sse_response("Enjoy your new git repo!")
.expect("create final assistant message"),
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
create_config_toml(&codex_home, &server.uri()).expect("write config");
// Start MCP server and initialize.
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(&codex_home).await.expect("spawn mcp");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init error");
// 1) newConversation
let new_conv_id = mcp
@@ -77,13 +83,17 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
cwd: Some(working_directory.to_string_lossy().into_owned()),
..Default::default()
})
.await?;
.await
.expect("send newConversation");
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let new_conv_resp = to_response::<NewConversationResponse>(new_conv_resp)?;
.await
.expect("newConversation timeout")
.expect("newConversation resp");
let new_conv_resp = to_response::<NewConversationResponse>(new_conv_resp)
.expect("deserialize newConversation response");
let NewConversationResponse {
conversation_id,
model,
@@ -94,18 +104,19 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await
.expect("send addConversationListener");
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
.await
.expect("addConversationListener timeout")
.expect("addConversationListener resp");
let AddConversationSubscriptionResponse { subscription_id } =
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?;
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)
.expect("deserialize addConversationListener response");
// 3) sendUserMessage (should trigger notifications; we only validate an OK response)
let send_user_id = mcp
@@ -115,13 +126,17 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
text: "text".to_string(),
}],
})
.await?;
.await
.expect("send sendUserMessage");
let send_user_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)),
)
.await??;
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?;
.await
.expect("sendUserMessage timeout")
.expect("sendUserMessage resp");
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)
.expect("deserialize sendUserMessage response");
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
@@ -129,7 +144,9 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
.await
.expect("task_finished_notification timeout")
.expect("task_finished_notification resp");
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")
@@ -147,31 +164,33 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
.send_remove_conversation_listener_request(RemoveConversationListenerParams {
subscription_id,
})
.await?;
.await
.expect("send removeConversationListener");
let remove_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(remove_listener_id)),
)
.await??;
let RemoveConversationSubscriptionResponse {} = to_response(remove_listener_resp)?;
Ok(())
.await
.expect("removeConversationListener timeout")
.expect("removeConversationListener resp");
let RemoveConversationSubscriptionResponse {} =
to_response(remove_listener_resp).expect("deserialize removeConversationListener response");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
async fn test_send_user_turn_changes_approval_policy_behavior() {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
return;
}
let tmp = TempDir::new()?;
let tmp = TempDir::new().expect("tmp dir");
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
std::fs::create_dir(&codex_home).expect("create codex home dir");
let working_directory = tmp.path().join("workdir");
std::fs::create_dir(&working_directory)?;
std::fs::create_dir(&working_directory).expect("create working directory");
// Mock server will request a python shell call for the first and second turn, then finish.
let responses = vec![
@@ -184,8 +203,10 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
Some(&working_directory),
Some(5000),
"call1",
)?,
create_final_assistant_message_sse_response("done 1")?,
)
.expect("create first shell sse response"),
create_final_assistant_message_sse_response("done 1")
.expect("create final assistant message 1"),
create_shell_sse_response(
vec![
"python3".to_string(),
@@ -195,15 +216,20 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
Some(&working_directory),
Some(5000),
"call2",
)?,
create_final_assistant_message_sse_response("done 2")?,
)
.expect("create second shell sse response"),
create_final_assistant_message_sse_response("done 2")
.expect("create final assistant message 2"),
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
create_config_toml(&codex_home, &server.uri()).expect("write config");
// Start MCP server and initialize.
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(&codex_home).await.expect("spawn mcp");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init error");
// 1) Start conversation with approval_policy=untrusted
let new_conv_id = mcp
@@ -211,30 +237,36 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
cwd: Some(working_directory.to_string_lossy().into_owned()),
..Default::default()
})
.await?;
.await
.expect("send newConversation");
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
.await
.expect("newConversation timeout")
.expect("newConversation resp");
let NewConversationResponse {
conversation_id, ..
} = to_response::<NewConversationResponse>(new_conv_resp)?;
} = to_response::<NewConversationResponse>(new_conv_resp)
.expect("deserialize newConversation response");
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
let _: AddConversationSubscriptionResponse = to_response::<AddConversationSubscriptionResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await
.expect("send addConversationListener");
let _: AddConversationSubscriptionResponse =
to_response::<AddConversationSubscriptionResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await
.expect("addConversationListener timeout")
.expect("addConversationListener resp"),
)
.await??,
)?;
.expect("deserialize addConversationListener response");
// 3) sendUserMessage triggers a shell call; approval policy is Untrusted so we should get an elicitation
let send_user_id = mcp
@@ -244,21 +276,27 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
text: "run python".to_string(),
}],
})
.await?;
.await
.expect("send sendUserMessage");
let _send_user_resp: SendUserMessageResponse = to_response::<SendUserMessageResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)),
)
.await??,
)?;
.await
.expect("sendUserMessage timeout")
.expect("sendUserMessage resp"),
)
.expect("deserialize sendUserMessage response");
// Expect an ExecCommandApproval request (elicitation)
let request = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
.await
.expect("waiting for exec approval request timeout")
.expect("exec approval request");
let ServerRequest::ExecCommandApproval { request_id, params } = request else {
panic!("expected ExecCommandApproval request, got: {request:?}");
};
@@ -274,7 +312,6 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
],
cwd: working_directory.clone(),
reason: None,
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}],
@@ -287,14 +324,17 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
)
.await?;
.await
.expect("send approval response");
// Wait for first TaskComplete
let _ = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
.await
.expect("task_complete 1 timeout")
.expect("task_complete 1 notification");
// 4) sendUserTurn with approval_policy=never should run without elicitation
let send_turn_id = mcp
@@ -310,15 +350,19 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
.await
.expect("send sendUserTurn");
// Acknowledge sendUserTurn
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
)
.await??,
)?;
.await
.expect("sendUserTurn timeout")
.expect("sendUserTurn resp"),
)
.expect("deserialize sendUserTurn response");
// Ensure we do NOT receive an ExecCommandApproval request before the task completes.
// If any Request is seen while waiting for task_complete, the helper will error and the test fails.
@@ -326,31 +370,31 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
.await
.expect("task_complete 2 timeout")
.expect("task_complete 2 notification");
}
// Helper: minimal config.toml pointing at mock provider.
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<()> {
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() {
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return Ok(());
return;
}
let tmp = TempDir::new()?;
let tmp = TempDir::new().expect("tmp dir");
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
std::fs::create_dir(&codex_home).expect("create codex home dir");
let workspace_root = tmp.path().join("workspace");
std::fs::create_dir(&workspace_root)?;
std::fs::create_dir(&workspace_root).expect("create workspace root");
let first_cwd = workspace_root.join("turn1");
let second_cwd = workspace_root.join("turn2");
std::fs::create_dir(&first_cwd)?;
std::fs::create_dir(&second_cwd)?;
std::fs::create_dir(&first_cwd).expect("create first cwd");
std::fs::create_dir(&second_cwd).expect("create second cwd");
let responses = vec![
create_shell_sse_response(
@@ -362,8 +406,10 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
None,
Some(5000),
"call-first",
)?,
create_final_assistant_message_sse_response("done first")?,
)
.expect("create first shell response"),
create_final_assistant_message_sse_response("done first")
.expect("create first final assistant message"),
create_shell_sse_response(
vec![
"bash".to_string(),
@@ -373,14 +419,21 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
None,
Some(5000),
"call-second",
)?,
create_final_assistant_message_sse_response("done second")?,
)
.expect("create second shell response"),
create_final_assistant_message_sse_response("done second")
.expect("create second final assistant message"),
];
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
create_config_toml(&codex_home, &server.uri()).expect("write config");
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(&codex_home)
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
@@ -389,29 +442,33 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
sandbox: Some(SandboxMode::WorkspaceWrite),
..Default::default()
})
.await?;
.await
.expect("send newConversation");
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
.await
.expect("newConversation timeout")
.expect("newConversation resp");
let NewConversationResponse {
conversation_id,
model,
..
} = to_response::<NewConversationResponse>(new_conv_resp)?;
} = to_response::<NewConversationResponse>(new_conv_resp)
.expect("deserialize newConversation response");
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await
.expect("send addConversationListener");
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
.await
.expect("addConversationListener timeout")
.expect("addConversationListener resp");
let first_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
@@ -431,17 +488,22 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
.await
.expect("send first sendUserTurn");
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
)
.await??;
.await
.expect("sendUserTurn 1 timeout")
.expect("sendUserTurn 1 resp");
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
.await
.expect("task_complete 1 timeout")
.expect("task_complete 1 notification");
let second_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
@@ -456,18 +518,60 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto,
})
.await?;
.await
.expect("send second sendUserTurn");
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
)
.await??;
.await
.expect("sendUserTurn 2 timeout")
.expect("sendUserTurn 2 resp");
let mut env_message: Option<String> = None;
let second_cwd_str = second_cwd.to_string_lossy().into_owned();
for _ in 0..10 {
let notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/user_message"),
)
.await
.expect("user_message timeout")
.expect("user_message notification");
let params = notification
.params
.clone()
.expect("user_message should include params");
let event: Event = serde_json::from_value(params).expect("deserialize user_message event");
if let EventMsg::UserMessage(user) = event.msg
&& matches!(user.kind, Some(InputMessageKind::EnvironmentContext))
&& user.message.contains(&second_cwd_str)
{
env_message = Some(user.message);
break;
}
}
let env_message = env_message.expect("expected environment context update");
assert!(
env_message.contains("<sandbox_mode>danger-full-access</sandbox_mode>"),
"env context should reflect new sandbox mode: {env_message}"
);
assert!(
env_message.contains("<network_access>enabled</network_access>"),
"env context should enable network access for danger-full-access policy: {env_message}"
);
assert!(
env_message.contains(&second_cwd_str),
"env context should include updated cwd: {env_message}"
);
let exec_begin_notification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
)
.await??;
.await
.expect("exec_command_begin timeout")
.expect("exec_command_begin notification");
let params = exec_begin_notification
.params
.clone()
@@ -495,9 +599,9 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
Ok(())
.await
.expect("task_complete 2 timeout")
.expect("task_complete 2 notification");
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {

View File

@@ -1,4 +1,6 @@
use anyhow::Result;
use std::collections::HashMap;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserSavedConfigResponse;
@@ -15,8 +17,6 @@ use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -60,21 +60,31 @@ chatgpt_base_url = "https://api.chatgpt.com"
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn get_config_toml_parses_all_fields() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn get_config_toml_parses_all_fields() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp.send_get_user_saved_config_request().await?;
let request_id = mcp
.send_get_user_saved_config_request()
.await
.expect("send getUserSavedConfig");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
let config: GetUserSavedConfigResponse = to_response(resp)?;
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
@@ -112,24 +122,33 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
};
assert_eq!(config, expected);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_config_toml_empty() -> Result<()> {
let codex_home = TempDir::new()?;
async fn get_config_toml_empty() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp.send_get_user_saved_config_request().await?;
let request_id = mcp
.send_get_user_saved_config_request()
.await
.expect("send getUserSavedConfig");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
let config: GetUserSavedConfigResponse = to_response(resp)?;
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: None,
@@ -148,5 +167,4 @@ async fn get_config_toml_empty() -> Result<()> {
};
assert_eq!(config, expected);
Ok(())
}

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
@@ -14,25 +15,31 @@ use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_conversation_create_and_send_message_ok() -> Result<()> {
async fn test_conversation_create_and_send_message_ok() {
// Mock server we won't strictly rely on it, but provide one to satisfy any model wiring.
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let responses = vec![
create_final_assistant_message_sse_response("Done").expect("build mock assistant message"),
];
let server = create_mock_chat_completions_server(responses).await;
// Temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let codex_home = TempDir::new().expect("create temp dir");
create_config_toml(codex_home.path(), &server.uri()).expect("write config.toml");
// Start MCP server process and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
// Create a conversation via the new JSON-RPC API.
let new_conv_id = mcp
@@ -40,35 +47,40 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
model: Some("o3".to_string()),
..Default::default()
})
.await?;
.await
.expect("send newConversation");
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
.await
.expect("newConversation timeout")
.expect("newConversation resp");
let NewConversationResponse {
conversation_id,
model,
reasoning_effort: _,
rollout_path: _,
} = to_response::<NewConversationResponse>(new_conv_resp)?;
} = to_response::<NewConversationResponse>(new_conv_resp)
.expect("deserialize newConversation response");
assert_eq!(model, "o3");
// Add a listener so we receive notifications for this conversation (not strictly required for this test).
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await
.expect("send addConversationListener");
let _sub: AddConversationSubscriptionResponse =
to_response::<AddConversationSubscriptionResponse>(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??,
)?;
.await
.expect("addConversationListener timeout")
.expect("addConversationListener resp"),
)
.expect("deserialize addConversationListener response");
// Now send a user message via the wire API and expect an OK (empty object) result.
let send_id = mcp
@@ -78,32 +90,36 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
text: "Hello".to_string(),
}],
})
.await?;
.await
.expect("send sendUserMessage");
let send_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await??;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(send_resp)?;
.await
.expect("sendUserMessage timeout")
.expect("sendUserMessage resp");
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(send_resp)
.expect("deserialize sendUserMessage response");
// avoid race condition by waiting for the mock server to receive the chat.completions request
let deadline = std::time::Instant::now() + DEFAULT_READ_TIMEOUT;
let requests = loop {
loop {
let requests = server.received_requests().await.unwrap_or_default();
if !requests.is_empty() {
break requests;
break;
}
if std::time::Instant::now() >= deadline {
panic!("mock server did not receive the chat.completions request in time");
}
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
};
}
// Verify the outbound request body matches expectations for Chat Completions.
let request = requests
.first()
.expect("mock server should have received at least one request");
let body = request.body_json::<serde_json::Value>()?;
let request = &server.received_requests().await.unwrap()[0];
let body = request
.body_json::<serde_json::Value>()
.expect("parse request body as JSON");
assert_eq!(body["model"], json!("o3"));
assert!(body["stream"].as_bool().unwrap_or(false));
let messages = body["messages"]
@@ -114,7 +130,6 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
assert_eq!(last["content"], json!("Hello"));
drop(server);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
@@ -13,39 +13,48 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
// Prepare a temporary Codex home and a separate root with test files.
let codex_home = TempDir::new()?;
let root = TempDir::new()?;
let codex_home = TempDir::new().context("create temp codex home")?;
let root = TempDir::new().context("create temp search root")?;
// Create files designed to have deterministic ordering for query "abe".
std::fs::write(root.path().join("abc"), "x")?;
std::fs::write(root.path().join("abcde"), "x")?;
std::fs::write(root.path().join("abexy"), "x")?;
std::fs::write(root.path().join("zzz.txt"), "x")?;
std::fs::write(root.path().join("abc"), "x").context("write file abc")?;
std::fs::write(root.path().join("abcde"), "x").context("write file abcde")?;
std::fs::write(root.path().join("abexy"), "x").context("write file abexy")?;
std::fs::write(root.path().join("zzz.txt"), "x").context("write file zzz")?;
let sub_dir = root.path().join("sub");
std::fs::create_dir_all(&sub_dir)?;
std::fs::create_dir_all(&sub_dir).context("create sub dir")?;
let sub_abce_path = sub_dir.join("abce");
std::fs::write(&sub_abce_path, "x")?;
std::fs::write(&sub_abce_path, "x").context("write file sub/abce")?;
let sub_abce_rel = sub_abce_path
.strip_prefix(root.path())?
.strip_prefix(root.path())
.context("strip root prefix from sub/abce")?
.to_string_lossy()
.to_string();
// Start MCP server and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.context("spawn mcp")?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.context("init timeout")?
.context("init failed")?;
let root_path = root.path().to_string_lossy().to_string();
// Send fuzzyFileSearch request.
let request_id = mcp
.send_fuzzy_file_search_request("abe", vec![root_path.clone()], None)
.await?;
.await
.context("send fuzzyFileSearch")?;
// Read response and verify shape and ordering.
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.context("fuzzyFileSearch timeout")?
.context("fuzzyFileSearch resp")?;
let value = resp.result;
// The path separator on Windows affects the score.
@@ -85,18 +94,24 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
let codex_home = TempDir::new()?;
let root = TempDir::new()?;
let codex_home = TempDir::new().context("create temp codex home")?;
let root = TempDir::new().context("create temp search root")?;
std::fs::write(root.path().join("alpha.txt"), "contents")?;
std::fs::write(root.path().join("alpha.txt"), "contents").context("write alpha")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.context("spawn mcp")?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.context("init timeout")?
.context("init failed")?;
let root_path = root.path().to_string_lossy().to_string();
let request_id = mcp
.send_fuzzy_file_search_request("alp", vec![root_path.clone()], None)
.await?;
.await
.context("send fuzzyFileSearch")?;
let request_id_2 = mcp
.send_fuzzy_file_search_request(
@@ -104,20 +119,23 @@ async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
vec![root_path.clone()],
Some(request_id.to_string()),
)
.await?;
.await
.context("send fuzzyFileSearch")?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id_2)),
)
.await??;
.await
.context("fuzzyFileSearch timeout")?
.context("fuzzyFileSearch resp")?;
let files = resp
.result
.get("files")
.ok_or_else(|| anyhow!("files key missing"))?
.context("files key missing")?
.as_array()
.ok_or_else(|| anyhow!("files not array"))?
.context("files not array")?
.clone();
assert_eq!(files.len(), 1);

View File

@@ -88,10 +88,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await?;
let _add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,

View File

@@ -1,4 +1,6 @@
use anyhow::Result;
use std::fs;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
@@ -11,13 +13,8 @@ use codex_app_server_protocol::ResumeConversationParams;
use codex_app_server_protocol::ResumeConversationResponse;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::SessionConfiguredNotification;
use codex_core::protocol::EventMsg;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::fs;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
@@ -25,56 +22,58 @@ use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_list_and_resume_conversations() -> Result<()> {
async fn test_list_and_resume_conversations() {
// Prepare a temporary CODEX_HOME with a few fake rollout files.
let codex_home = TempDir::new()?;
let codex_home = TempDir::new().expect("create temp dir");
create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello A",
Some("openai"),
)?;
);
create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello B",
Some("openai"),
)?;
);
create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello C",
None,
)?;
);
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
// Request first page with size 2
let req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(2),
cursor: None,
model_providers: None,
})
.await?;
.await
.expect("send listConversations");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
.await
.expect("listConversations timeout")
.expect("listConversations resp");
let ListConversationsResponse { items, next_cursor } =
to_response::<ListConversationsResponse>(resp)?;
to_response::<ListConversationsResponse>(resp).expect("deserialize response");
assert_eq!(items.len(), 2);
// Newest first; preview text should match
assert_eq!(items[0].preview, "Hello A");
assert_eq!(items[1].preview, "Hello B");
assert_eq!(items[0].model_provider, "openai");
assert_eq!(items[1].model_provider, "openai");
assert!(items[0].path.is_absolute());
assert!(next_cursor.is_some());
@@ -83,315 +82,100 @@ async fn test_list_and_resume_conversations() -> Result<()> {
.send_list_conversations_request(ListConversationsParams {
page_size: Some(2),
cursor: next_cursor,
model_providers: None,
})
.await?;
.await
.expect("send listConversations page 2");
let resp2: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id2)),
)
.await??;
.await
.expect("listConversations page 2 timeout")
.expect("listConversations page 2 resp");
let ListConversationsResponse {
items: items2,
next_cursor: next2,
..
} = to_response::<ListConversationsResponse>(resp2)?;
} = to_response::<ListConversationsResponse>(resp2).expect("deserialize response");
assert_eq!(items2.len(), 1);
assert_eq!(items2[0].preview, "Hello C");
assert_eq!(items2[0].model_provider, "openai");
assert_eq!(next2, None);
assert!(next2.is_some());
// Add a conversation with an explicit non-OpenAI provider for filter tests.
create_fake_rollout(
codex_home.path(),
"2025-01-01T11-30-00",
"2025-01-01T11:30:00Z",
"Hello TP",
Some("test-provider"),
)?;
// Filtering by model provider should return only matching sessions.
let filter_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(vec!["test-provider".to_string()]),
})
.await?;
let filter_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(filter_req_id)),
)
.await??;
let ListConversationsResponse {
items: filtered_items,
next_cursor: filtered_next,
} = to_response::<ListConversationsResponse>(filter_resp)?;
assert_eq!(filtered_items.len(), 1);
assert_eq!(filtered_next, None);
assert_eq!(filtered_items[0].preview, "Hello TP");
assert_eq!(filtered_items[0].model_provider, "test-provider");
// Empty filter should include every session regardless of provider metadata.
let unfiltered_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(Vec::new()),
})
.await?;
let unfiltered_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(unfiltered_req_id)),
)
.await??;
let ListConversationsResponse {
items: unfiltered_items,
next_cursor: unfiltered_next,
} = to_response::<ListConversationsResponse>(unfiltered_resp)?;
assert_eq!(unfiltered_items.len(), 4);
assert!(unfiltered_next.is_none());
let empty_req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(10),
cursor: None,
model_providers: Some(vec!["other".to_string()]),
})
.await?;
let empty_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(empty_req_id)),
)
.await??;
let ListConversationsResponse {
items: empty_items,
next_cursor: empty_next,
} = to_response::<ListConversationsResponse>(empty_resp)?;
assert!(empty_items.is_empty());
assert!(empty_next.is_none());
let first_item = &items[0];
// Now resume one of the sessions from an explicit rollout path.
// Now resume one of the sessions and expect a SessionConfigured notification and response.
let resume_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: Some(first_item.path.clone()),
conversation_id: None,
history: None,
path: items[0].path.clone(),
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
.await
.expect("send resumeConversation");
// Expect a codex/event notification with msg.type == sessionConfigured
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
.await
.expect("sessionConfigured notification timeout")
.expect("sessionConfigured notification");
let session_configured: ServerNotification = notification
.try_into()
.expect("deserialize sessionConfigured notification");
// Basic shape assertion: ensure event type is sessionConfigured
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
rollout_path,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert_eq!(rollout_path, first_item.path.clone());
let session_initial_messages = session_initial_messages
.expect("expected initial messages when resuming from rollout path");
match session_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages from rollout resume: {other:#?}"),
}
assert_eq!(items[0].path.clone(), rollout_path);
// Then the response for resumeConversation
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_req_id)),
)
.await??;
.await
.expect("resumeConversation timeout")
.expect("resumeConversation resp");
let ResumeConversationResponse {
conversation_id,
model: resume_model,
initial_messages: response_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
conversation_id, ..
} = to_response::<ResumeConversationResponse>(resume_resp)
.expect("deserialize resumeConversation response");
// conversation id should be a valid UUID
assert!(!conversation_id.to_string().is_empty());
assert_eq!(resume_model, "o3");
let response_initial_messages =
response_initial_messages.expect("expected initial messages in resume response");
match response_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages in resume response: {other:#?}"),
}
// Resuming with only a conversation id should locate the rollout automatically.
let resume_by_id_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: None,
conversation_id: Some(first_item.conversation_id),
history: None,
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
rollout_path,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert_eq!(rollout_path, first_item.path.clone());
let session_initial_messages = session_initial_messages
.expect("expected initial messages when resuming from conversation id");
match session_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => panic!("unexpected initial messages from conversation id resume: {other:#?}"),
}
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_by_id_req_id)),
)
.await??;
let ResumeConversationResponse {
conversation_id: by_id_conversation_id,
model: by_id_model,
initial_messages: by_id_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
assert!(!by_id_conversation_id.to_string().is_empty());
assert_eq!(by_id_model, "o3");
let by_id_initial_messages = by_id_initial_messages
.expect("expected initial messages when resuming from conversation id response");
match by_id_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, first_item.preview.clone());
}
other => {
panic!("unexpected initial messages in conversation id resume response: {other:#?}")
}
}
// Resuming with explicit history should succeed even without a stored rollout.
let fork_history_text = "Hello from history";
let history = vec![ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: fork_history_text.to_string(),
}],
}];
let resume_with_history_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {
path: None,
conversation_id: None,
history: Some(history),
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert!(
session_initial_messages.as_ref().is_none_or(Vec::is_empty),
"expected no initial messages when resuming from explicit history but got {session_initial_messages:#?}"
);
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_with_history_req_id)),
)
.await??;
let ResumeConversationResponse {
conversation_id: history_conversation_id,
model: history_model,
initial_messages: history_initial_messages,
..
} = to_response::<ResumeConversationResponse>(resume_resp)?;
assert!(!history_conversation_id.to_string().is_empty());
assert_eq!(history_model, "o3");
assert!(
history_initial_messages.as_ref().is_none_or(Vec::is_empty),
"expected no initial messages in resume response when history is provided but got {history_initial_messages:#?}"
);
Ok(())
}
fn create_fake_rollout(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
) -> Result<()> {
fn create_fake_rollout(codex_home: &Path, filename_ts: &str, meta_rfc3339: &str, preview: &str) {
let uuid = Uuid::new_v4();
// sessions/YYYY/MM/DD/ derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
fs::create_dir_all(&dir).unwrap_or_else(|e| panic!("create sessions dir: {e}"));
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
let mut lines = Vec::new();
// Meta line with timestamp (flattened meta in payload for new schema)
let mut payload = json!({
"id": uuid,
"timestamp": meta_rfc3339,
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null,
});
if let Some(provider) = model_provider {
payload["model_provider"] = json!(provider);
}
lines.push(
json!({
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
"payload": {
"id": uuid,
"timestamp": meta_rfc3339,
"cwd": "/",
"originator": "codex",
"cli_version": "0.0.0",
"instructions": null
}
})
.to_string(),
);
@@ -421,6 +205,6 @@ fn create_fake_rollout(
})
.to_string(),
);
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(())
fs::write(file_path, lines.join("\n") + "\n")
.unwrap_or_else(|e| panic!("write rollout file: {e}"));
}

View File

@@ -1,4 +1,6 @@
use anyhow::Result;
use std::path::Path;
use std::time::Duration;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CancelLoginChatGptParams;
@@ -10,11 +12,7 @@ use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::LogoutChatGptResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -43,26 +41,32 @@ stream_max_retries = 0
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn logout_chatgpt_removes_auth() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
login_with_api_key(
codex_home.path(),
"sk-test-key",
AuthCredentialsStoreMode::File,
)?;
async fn logout_chatgpt_removes_auth() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).expect("write config.toml");
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
assert!(codex_home.path().join("auth.json").exists());
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let id = mcp.send_logout_chat_gpt_request().await?;
let id = mcp
.send_logout_chat_gpt_request()
.await
.expect("send logoutChatGpt");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(id)),
)
.await??;
let _ok: LogoutChatGptResponse = to_response(resp)?;
.await
.expect("logoutChatGpt timeout")
.expect("logoutChatGpt response");
let _ok: LogoutChatGptResponse = to_response(resp).expect("deserialize logout response");
assert!(
!codex_home.path().join("auth.json").exists(),
@@ -75,47 +79,61 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
include_token: Some(true),
refresh_token: Some(false),
})
.await?;
.await
.expect("send getAuthStatus");
let status_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(status_id)),
)
.await??;
let status: GetAuthStatusResponse = to_response(status_resp)?;
.await
.expect("getAuthStatus timeout")
.expect("getAuthStatus response");
let status: GetAuthStatusResponse = to_response(status_resp).expect("deserialize status");
assert_eq!(status.auth_method, None);
assert_eq!(status.auth_token, None);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_and_cancel_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn login_and_cancel_chatgpt() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let login_id = mcp.send_login_chat_gpt_request().await?;
let login_id = mcp
.send_login_chat_gpt_request()
.await
.expect("send loginChatGpt");
let login_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
)
.await??;
let login: LoginChatGptResponse = to_response(login_resp)?;
.await
.expect("loginChatGpt timeout")
.expect("loginChatGpt response");
let login: LoginChatGptResponse = to_response(login_resp).expect("deserialize login resp");
let cancel_id = mcp
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
login_id: login.login_id,
})
.await?;
.await
.expect("send cancelLoginChatGpt");
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
.await
.expect("cancelLoginChatGpt timeout")
.expect("cancelLoginChatGpt response");
let _ok: CancelLoginChatGptResponse =
to_response(cancel_resp).expect("deserialize cancel response");
// Optionally observe the completion notification; do not fail if it races.
let maybe_note = timeout(
@@ -126,7 +144,6 @@ async fn login_and_cancel_chatgpt() -> Result<()> {
if maybe_note.is_err() {
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
}
Ok(())
}
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
@@ -159,48 +176,66 @@ forced_chatgpt_workspace_id = "{workspace_id}"
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn login_chatgpt_rejected_when_forced_api() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_login(codex_home.path(), "api")?;
async fn login_chatgpt_rejected_when_forced_api() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml_forced_login(codex_home.path(), "api")
.unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp.send_login_chat_gpt_request().await?;
let request_id = mcp
.send_login_chat_gpt_request()
.await
.expect("send loginChatGpt");
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("loginChatGpt error timeout")
.expect("loginChatGpt error");
assert_eq!(
err.error.message,
"ChatGPT login is disabled. Use API key login instead."
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml_forced_workspace(codex_home.path(), "ws-forced")?;
async fn login_chatgpt_includes_forced_workspace_query_param() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml_forced_workspace(codex_home.path(), "ws-forced")
.unwrap_or_else(|err| panic!("write config.toml: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp.send_login_chat_gpt_request().await?;
let request_id = mcp
.send_login_chat_gpt_request()
.await
.expect("send loginChatGpt");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("loginChatGpt timeout")
.expect("loginChatGpt response");
let login: LoginChatGptResponse = to_response(resp)?;
let login: LoginChatGptResponse = to_response(resp).expect("deserialize login resp");
assert!(
login.auth_url.contains("allowed_workspace_id=ws-forced"),
"auth URL should include forced workspace"
);
Ok(())
}

View File

@@ -7,8 +7,6 @@ mod fuzzy_file_search;
mod interrupt;
mod list_resume;
mod login;
mod model_list;
mod rate_limits;
mod send_message;
mod set_default_model;
mod user_agent;

View File

@@ -1,183 +0,0 @@
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(100),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ListModelsResponse { items, next_cursor } = to_response::<ListModelsResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
];
assert_eq!(items, expected_models);
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let ListModelsResponse {
items: first_items,
next_cursor: first_cursor,
} = to_response::<ListModelsResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5-codex");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let ListModelsResponse {
items: second_items,
next_cursor: second_cursor,
} = to_response::<ListModelsResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5");
assert!(second_cursor.is_none());
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_rejects_invalid_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ListModelsParams {
page_size: None,
cursor: Some("invalid".to_string()),
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(error.error.message, "invalid cursor: invalid");
Ok(())
}

View File

@@ -1,181 +0,0 @@
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::header;
use wiremock::matchers::method;
use wiremock::matchers::path;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_requires_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error.error.message,
"codex account authentication required to read rate limits"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
login_with_api_key(&mut mcp, "sk-test-key").await?;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error.error.message,
"chatgpt authentication required to read rate limits"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let codex_home = TempDir::new()?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let server = MockServer::start().await;
let server_url = server.uri();
write_chatgpt_base_url(codex_home.path(), &server_url)?;
let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z")
.expect("parse primary reset timestamp")
.timestamp();
let secondary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T01:00:00Z")
.expect("parse secondary reset timestamp")
.timestamp();
let response_body = json!({
"plan_type": "pro",
"rate_limit": {
"allowed": true,
"limit_reached": false,
"primary_window": {
"used_percent": 42,
"limit_window_seconds": 3600,
"reset_after_seconds": 120,
"reset_at": primary_reset_timestamp,
},
"secondary_window": {
"used_percent": 5,
"limit_window_seconds": 86400,
"reset_after_seconds": 43200,
"reset_at": secondary_reset_timestamp,
}
}
});
Mock::given(method("GET"))
.and(path("/api/codex/usage"))
.and(header("authorization", "Bearer chatgpt-token"))
.and(header("chatgpt-account-id", "account-123"))
.respond_with(ResponseTemplate::new(200).set_body_json(response_body))
.mount(&server)
.await;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp.send_get_account_rate_limits_request().await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountRateLimitsResponse = to_response(response)?;
let expected = GetAccountRateLimitsResponse {
rate_limits: RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: 42.0,
window_minutes: Some(60),
resets_at: Some(primary_reset_timestamp),
}),
secondary: Some(RateLimitWindow {
used_percent: 5.0,
window_minutes: Some(1440),
resets_at: Some(secondary_reset_timestamp),
}),
},
};
assert_eq!(received, expected);
Ok(())
}
async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: api_key.to_string(),
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
Ok(())
}
fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n"))
}

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
@@ -14,76 +15,73 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_protocol::ConversationId;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RawResponseItemEvent;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn test_send_message_success() -> Result<()> {
async fn test_send_message_success() {
// Spin up a mock completions server that immediately ends the Codex turn.
// Two Codex turns hit the mock model (session start + send-user-message). Provide two SSE responses.
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done").expect("build mock assistant message"),
create_final_assistant_message_sse_response("Done").expect("build mock assistant message"),
];
let server = create_mock_chat_completions_server(responses).await;
// Create a temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let codex_home = TempDir::new().expect("create temp dir");
create_config_toml(codex_home.path(), &server.uri()).expect("write config.toml");
// Start MCP server process and initialize.
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timed out")
.expect("init failed");
// Start a conversation using the new wire API.
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
..Default::default()
})
.await?;
.send_new_conversation_request(NewConversationParams::default())
.await
.expect("send newConversation");
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
.await
.expect("newConversation timeout")
.expect("newConversation resp");
let NewConversationResponse {
conversation_id, ..
} = to_response::<_>(new_conv_resp)?;
} = to_response::<_>(new_conv_resp).expect("deserialize newConversation response");
// 2) addConversationListener
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: false,
})
.await?;
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
.await
.expect("send addConversationListener");
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
.await
.expect("addConversationListener timeout")
.expect("addConversationListener resp");
let AddConversationSubscriptionResponse { subscription_id: _ } =
to_response::<_>(add_listener_resp)?;
to_response::<_>(add_listener_resp).expect("deserialize addConversationListener response");
// Now exercise sendUserMessage twice.
send_message("Hello", conversation_id, &mut mcp).await?;
send_message("Hello again", conversation_id, &mut mcp).await?;
Ok(())
send_message("Hello", conversation_id, &mut mcp).await;
send_message("Hello again", conversation_id, &mut mcp).await;
}
#[expect(clippy::expect_used)]
async fn send_message(
message: &str,
conversation_id: ConversationId,
mcp: &mut McpProcess,
) -> Result<()> {
async fn send_message(message: &str, conversation_id: ConversationId, mcp: &mut McpProcess) {
// Now exercise sendUserMessage.
let send_id = mcp
.send_send_user_message_request(SendUserMessageParams {
@@ -92,15 +90,19 @@ async fn send_message(
text: message.to_string(),
}],
})
.await?;
.await
.expect("send sendUserMessage");
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await??;
.await
.expect("sendUserMessage response timeout")
.expect("sendUserMessage response error");
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)
.expect("deserialize sendUserMessage response");
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
@@ -108,7 +110,9 @@ async fn send_message(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
.await
.expect("task_finished_notification timeout")
.expect("task_finished_notification resp");
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")
@@ -120,105 +124,17 @@ async fn send_message(
.expect("should have conversationId"),
&serde_json::Value::String(conversation_id.to_string())
);
let raw_attempt = tokio::time::timeout(
std::time::Duration::from_millis(200),
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
)
.await;
assert!(
raw_attempt.is_err(),
"unexpected raw item notification when not opted in"
);
Ok(())
}
#[tokio::test]
async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_chat_completions_server(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let new_conv_id = mcp
.send_new_conversation_request(NewConversationParams {
developer_instructions: Some("Use the test harness tools.".to_string()),
..Default::default()
})
.await?;
let new_conv_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
)
.await??;
let NewConversationResponse {
conversation_id, ..
} = to_response::<_>(new_conv_resp)?;
let add_listener_id = mcp
.send_add_conversation_listener_request(AddConversationListenerParams {
conversation_id,
experimental_raw_events: true,
})
.await?;
let add_listener_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
)
.await??;
let AddConversationSubscriptionResponse { subscription_id: _ } =
to_response::<_>(add_listener_resp)?;
let send_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
}],
})
.await?;
let developer = read_raw_response_item(&mut mcp, conversation_id).await;
assert_developer_message(&developer, "Use the test harness tools.");
let instructions = read_raw_response_item(&mut mcp, conversation_id).await;
assert_instructions_message(&instructions);
let environment = read_raw_response_item(&mut mcp, conversation_id).await;
assert_environment_message(&environment);
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await??;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?;
let user_message = read_raw_response_item(&mut mcp, conversation_id).await;
assert_user_message(&user_message, "Hello");
let assistant_message = read_raw_response_item(&mut mcp, conversation_id).await;
assert_assistant_message(&assistant_message, "Done");
let _ = tokio::time::timeout(
std::time::Duration::from_millis(250),
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await;
Ok(())
}
#[tokio::test]
async fn test_send_message_session_not_found() -> Result<()> {
async fn test_send_message_session_not_found() {
// Start MCP without creating a Codex session
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let codex_home = TempDir::new().expect("tempdir");
let mut mcp = McpProcess::new(codex_home.path()).await.expect("spawn");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("timeout")
.expect("init");
let unknown = ConversationId::new();
let req_id = mcp
@@ -228,16 +144,18 @@ async fn test_send_message_session_not_found() -> Result<()> {
text: "ping".to_string(),
}],
})
.await?;
.await
.expect("send sendUserMessage");
// Expect an error response for unknown conversation.
let err = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(req_id)),
)
.await??;
.await
.expect("timeout")
.expect("error");
assert_eq!(err.id, RequestId::Integer(req_id));
Ok(())
}
// ---------------------------------------------------------------------------
@@ -266,126 +184,3 @@ stream_max_retries = 0
),
)
}
#[expect(clippy::expect_used)]
async fn read_raw_response_item(
mcp: &mut McpProcess,
conversation_id: ConversationId,
) -> ResponseItem {
let raw_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
)
.await
.expect("codex/event/raw_response_item notification timeout")
.expect("codex/event/raw_response_item notification resp");
let serde_json::Value::Object(params) = raw_notification
.params
.expect("codex/event/raw_response_item should have params")
else {
panic!("codex/event/raw_response_item should have params");
};
let conversation_id_value = params
.get("conversationId")
.and_then(|value| value.as_str())
.expect("raw response item should include conversationId");
assert_eq!(
conversation_id_value,
conversation_id.to_string(),
"raw response item conversation mismatch"
);
let msg_value = params
.get("msg")
.cloned()
.expect("raw response item should include msg payload");
let event: RawResponseItemEvent =
serde_json::from_value(msg_value).expect("deserialize raw response item");
event.item
}
fn assert_instructions_message(item: &ResponseItem) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
let is_instructions = texts
.iter()
.any(|text| text.starts_with("# AGENTS.md instructions for "));
assert!(
is_instructions,
"expected instructions message, got {texts:?}"
);
}
other => panic!("expected instructions message, got {other:?}"),
}
}
fn assert_developer_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "developer");
let texts = content_texts(content);
assert_eq!(
texts,
vec![expected_text],
"expected developer instructions message, got {texts:?}"
);
}
other => panic!("expected developer instructions message, got {other:?}"),
}
}
fn assert_environment_message(item: &ResponseItem) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
assert!(
texts
.iter()
.any(|text| text.contains("<environment_context>")),
"expected environment context message, got {texts:?}"
);
}
other => panic!("expected environment message, got {other:?}"),
}
}
fn assert_user_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "user");
let texts = content_texts(content);
assert_eq!(texts, vec![expected_text]);
}
other => panic!("expected user message, got {other:?}"),
}
}
fn assert_assistant_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "assistant");
let texts = content_texts(content);
assert_eq!(texts, vec![expected_text]);
}
other => panic!("expected assistant message, got {other:?}"),
}
}
fn content_texts(content: &[ContentItem]) -> Vec<&str> {
content
.iter()
.filter_map(|item| match item {
ContentItem::InputText { text } | ContentItem::OutputText { text } => {
Some(text.as_str())
}
_ => None,
})
.collect()
}

View File

@@ -1,4 +1,5 @@
use anyhow::Result;
use std::path::Path;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
@@ -7,38 +8,50 @@ use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::SetDefaultModelResponse;
use codex_core::config::ConfigToml;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn set_default_model_persists_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
async fn set_default_model_persists_overrides() {
let codex_home = TempDir::new().expect("create tempdir");
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let params = SetDefaultModelParams {
model: Some("gpt-4.1".to_string()),
reasoning_effort: None,
};
let request_id = mcp.send_set_default_model_request(params).await?;
let request_id = mcp
.send_set_default_model_request(params)
.await
.expect("send setDefaultModel");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("setDefaultModel timeout")
.expect("setDefaultModel response");
let _: SetDefaultModelResponse = to_response(resp)?;
let _: SetDefaultModelResponse =
to_response(resp).expect("deserialize setDefaultModel response");
let config_path = codex_home.path().join("config.toml");
let config_contents = tokio::fs::read_to_string(&config_path).await?;
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
let config_contents = tokio::fs::read_to_string(&config_path)
.await
.expect("read config.toml");
let config_toml: ConfigToml = toml::from_str(&config_contents).expect("parse config.toml");
assert_eq!(
ConfigToml {
@@ -48,7 +61,6 @@ async fn set_default_model_persists_overrides() -> Result<()> {
},
config_toml,
);
Ok(())
}
// Helper to create a config.toml; mirrors create_conversation.rs

View File

@@ -1,4 +1,3 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserAgentResponse;
@@ -11,18 +10,28 @@ use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> {
let codex_home = TempDir::new()?;
async fn get_user_agent_returns_current_codex_user_agent() {
let codex_home = TempDir::new().unwrap_or_else(|err| panic!("create tempdir: {err}"));
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("initialize timeout")
.expect("initialize request");
let request_id = mcp.send_get_user_agent_request().await?;
let request_id = mcp
.send_get_user_agent_request()
.await
.expect("send getUserAgent");
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("getUserAgent timeout")
.expect("getUserAgent response");
let os_info = os_info::get();
let user_agent = format!(
@@ -33,9 +42,9 @@ async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> {
codex_core::terminal::user_agent()
);
let received: GetUserAgentResponse = to_response(response)?;
let received: GetUserAgentResponse =
to_response(response).expect("deserialize getUserAgent response");
let expected = GetUserAgentResponse { user_agent };
assert_eq!(received, expected);
Ok(())
}

View File

@@ -1,46 +1,78 @@
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use std::time::Duration;
use anyhow::Context;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::UserInfoResponse;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::get_auth_file;
use codex_core::auth::write_auth_json;
use codex_core::token_data::IdTokenInfo;
use codex_core::token_data::TokenData;
use pretty_assertions::assert_eq;
use std::time::Duration;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_info_returns_email_from_auth_json() -> Result<()> {
let codex_home = TempDir::new()?;
async fn user_info_returns_email_from_auth_json() {
let codex_home = TempDir::new().expect("create tempdir");
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access")
.refresh_token("refresh")
.email("user@example.com"),
AuthCredentialsStoreMode::File,
)?;
let auth_path = get_auth_file(codex_home.path());
let mut id_token = IdTokenInfo::default();
id_token.email = Some("user@example.com".to_string());
id_token.raw_jwt = encode_id_token_with_email("user@example.com").expect("encode id token");
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let auth = AuthDotJson {
openai_api_key: None,
tokens: Some(TokenData {
id_token,
access_token: "access".to_string(),
refresh_token: "refresh".to_string(),
account_id: None,
}),
last_refresh: None,
};
write_auth_json(&auth_path, &auth).expect("write auth.json");
let request_id = mcp.send_user_info_request().await?;
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("initialize timeout")
.expect("initialize request");
let request_id = mcp.send_user_info_request().await.expect("send userInfo");
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
.await
.expect("userInfo timeout")
.expect("userInfo response");
let received: UserInfoResponse = to_response(response)?;
let received: UserInfoResponse = to_response(response).expect("deserialize userInfo response");
let expected = UserInfoResponse {
alleged_user_email: Some("user@example.com".to_string()),
};
assert_eq!(received, expected);
Ok(())
}
fn encode_id_token_with_email(email: &str) -> anyhow::Result<String> {
let header_b64 = URL_SAFE_NO_PAD.encode(
serde_json::to_vec(&json!({ "alg": "none", "typ": "JWT" }))
.context("serialize jwt header")?,
);
let payload =
serde_json::to_vec(&json!({ "email": email })).context("serialize jwt payload")?;
let payload_b64 = URL_SAFE_NO_PAD.encode(payload);
Ok(format!("{header_b64}.{payload_b64}.signature"))
}

View File

@@ -1,3 +1 @@
mod cli;
#[cfg(not(target_os = "windows"))]
mod tool;

View File

@@ -1,257 +0,0 @@
use assert_cmd::Command;
use pretty_assertions::assert_eq;
use std::fs;
use std::path::Path;
use tempfile::tempdir;
fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result<assert_cmd::assert::Assert> {
let mut cmd = Command::cargo_bin("apply_patch")?;
cmd.current_dir(dir);
Ok(cmd.arg(patch).assert())
}
fn apply_patch_command(dir: &Path) -> anyhow::Result<Command> {
let mut cmd = Command::cargo_bin("apply_patch")?;
cmd.current_dir(dir);
Ok(cmd)
}
#[test]
fn test_apply_patch_cli_applies_multiple_operations() -> anyhow::Result<()> {
let tmp = tempdir()?;
let modify_path = tmp.path().join("modify.txt");
let delete_path = tmp.path().join("delete.txt");
fs::write(&modify_path, "line1\nline2\n")?;
fs::write(&delete_path, "obsolete\n")?;
let patch = "*** Begin Patch\n*** Add File: nested/new.txt\n+created\n*** Delete File: delete.txt\n*** Update File: modify.txt\n@@\n-line2\n+changed\n*** End Patch";
run_apply_patch_in_dir(tmp.path(), patch)?.success().stdout(
"Success. Updated the following files:\nA nested/new.txt\nM modify.txt\nD delete.txt\n",
);
assert_eq!(
fs::read_to_string(tmp.path().join("nested/new.txt"))?,
"created\n"
);
assert_eq!(fs::read_to_string(&modify_path)?, "line1\nchanged\n");
assert!(!delete_path.exists());
Ok(())
}
#[test]
fn test_apply_patch_cli_applies_multiple_chunks() -> anyhow::Result<()> {
let tmp = tempdir()?;
let target_path = tmp.path().join("multi.txt");
fs::write(&target_path, "line1\nline2\nline3\nline4\n")?;
let patch = "*** Begin Patch\n*** Update File: multi.txt\n@@\n-line2\n+changed2\n@@\n-line4\n+changed4\n*** End Patch";
run_apply_patch_in_dir(tmp.path(), patch)?
.success()
.stdout("Success. Updated the following files:\nM multi.txt\n");
assert_eq!(
fs::read_to_string(&target_path)?,
"line1\nchanged2\nline3\nchanged4\n"
);
Ok(())
}
#[test]
fn test_apply_patch_cli_moves_file_to_new_directory() -> anyhow::Result<()> {
let tmp = tempdir()?;
let original_path = tmp.path().join("old/name.txt");
let new_path = tmp.path().join("renamed/dir/name.txt");
fs::create_dir_all(original_path.parent().expect("parent should exist"))?;
fs::write(&original_path, "old content\n")?;
let patch = "*** Begin Patch\n*** Update File: old/name.txt\n*** Move to: renamed/dir/name.txt\n@@\n-old content\n+new content\n*** End Patch";
run_apply_patch_in_dir(tmp.path(), patch)?
.success()
.stdout("Success. Updated the following files:\nM renamed/dir/name.txt\n");
assert!(!original_path.exists());
assert_eq!(fs::read_to_string(&new_path)?, "new content\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_rejects_empty_patch() -> anyhow::Result<()> {
let tmp = tempdir()?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** End Patch")
.assert()
.failure()
.stderr("No files were modified.\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_reports_missing_context() -> anyhow::Result<()> {
let tmp = tempdir()?;
let target_path = tmp.path().join("modify.txt");
fs::write(&target_path, "line1\nline2\n")?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Update File: modify.txt\n@@\n-missing\n+changed\n*** End Patch")
.assert()
.failure()
.stderr("Failed to find expected lines in modify.txt:\nmissing\n");
assert_eq!(fs::read_to_string(&target_path)?, "line1\nline2\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_rejects_missing_file_delete() -> anyhow::Result<()> {
let tmp = tempdir()?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Delete File: missing.txt\n*** End Patch")
.assert()
.failure()
.stderr("Failed to delete file missing.txt\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_rejects_empty_update_hunk() -> anyhow::Result<()> {
let tmp = tempdir()?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Update File: foo.txt\n*** End Patch")
.assert()
.failure()
.stderr("Invalid patch hunk on line 2: Update file hunk for path 'foo.txt' is empty\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_requires_existing_file_for_update() -> anyhow::Result<()> {
let tmp = tempdir()?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Update File: missing.txt\n@@\n-old\n+new\n*** End Patch")
.assert()
.failure()
.stderr(
"Failed to read file to update missing.txt: No such file or directory (os error 2)\n",
);
Ok(())
}
#[test]
fn test_apply_patch_cli_move_overwrites_existing_destination() -> anyhow::Result<()> {
let tmp = tempdir()?;
let original_path = tmp.path().join("old/name.txt");
let destination = tmp.path().join("renamed/dir/name.txt");
fs::create_dir_all(original_path.parent().expect("parent should exist"))?;
fs::create_dir_all(destination.parent().expect("parent should exist"))?;
fs::write(&original_path, "from\n")?;
fs::write(&destination, "existing\n")?;
run_apply_patch_in_dir(
tmp.path(),
"*** Begin Patch\n*** Update File: old/name.txt\n*** Move to: renamed/dir/name.txt\n@@\n-from\n+new\n*** End Patch",
)?
.success()
.stdout("Success. Updated the following files:\nM renamed/dir/name.txt\n");
assert!(!original_path.exists());
assert_eq!(fs::read_to_string(&destination)?, "new\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_add_overwrites_existing_file() -> anyhow::Result<()> {
let tmp = tempdir()?;
let path = tmp.path().join("duplicate.txt");
fs::write(&path, "old content\n")?;
run_apply_patch_in_dir(
tmp.path(),
"*** Begin Patch\n*** Add File: duplicate.txt\n+new content\n*** End Patch",
)?
.success()
.stdout("Success. Updated the following files:\nA duplicate.txt\n");
assert_eq!(fs::read_to_string(&path)?, "new content\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_delete_directory_fails() -> anyhow::Result<()> {
let tmp = tempdir()?;
fs::create_dir(tmp.path().join("dir"))?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Delete File: dir\n*** End Patch")
.assert()
.failure()
.stderr("Failed to delete file dir\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_rejects_invalid_hunk_header() -> anyhow::Result<()> {
let tmp = tempdir()?;
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Frobnicate File: foo\n*** End Patch")
.assert()
.failure()
.stderr("Invalid patch hunk on line 2: '*** Frobnicate File: foo' is not a valid hunk header. Valid hunk headers: '*** Add File: {path}', '*** Delete File: {path}', '*** Update File: {path}'\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_updates_file_appends_trailing_newline() -> anyhow::Result<()> {
let tmp = tempdir()?;
let target_path = tmp.path().join("no_newline.txt");
fs::write(&target_path, "no newline at end")?;
run_apply_patch_in_dir(
tmp.path(),
"*** Begin Patch\n*** Update File: no_newline.txt\n@@\n-no newline at end\n+first line\n+second line\n*** End Patch",
)?
.success()
.stdout("Success. Updated the following files:\nM no_newline.txt\n");
let contents = fs::read_to_string(&target_path)?;
assert!(contents.ends_with('\n'));
assert_eq!(contents, "first line\nsecond line\n");
Ok(())
}
#[test]
fn test_apply_patch_cli_failure_after_partial_success_leaves_changes() -> anyhow::Result<()> {
let tmp = tempdir()?;
let new_file = tmp.path().join("created.txt");
apply_patch_command(tmp.path())?
.arg("*** Begin Patch\n*** Add File: created.txt\n+hello\n*** Update File: missing.txt\n@@\n-old\n+new\n*** End Patch")
.assert()
.failure()
.stdout("")
.stderr("Failed to read file to update missing.txt: No such file or directory (os error 2)\n");
assert_eq!(fs::read_to_string(&new_file)?, "hello\n");
Ok(())
}

View File

@@ -13,8 +13,6 @@ serde = { version = "1", features = ["derive"] }
serde_json = "1"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
codex-backend-openapi-models = { path = "../codex-backend-openapi-models" }
codex-protocol = { workspace = true }
codex-core = { workspace = true }
[dev-dependencies]
pretty_assertions = "1"

View File

@@ -1,13 +1,7 @@
use crate::types::CodeTaskDetailsResponse;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
use crate::types::RateLimitWindowSnapshot;
use crate::types::TurnAttemptsSiblingTurnsResponse;
use anyhow::Result;
use codex_core::auth::CodexAuth;
use codex_core::default_client::get_codex_user_agent;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use reqwest::header::AUTHORIZATION;
use reqwest::header::CONTENT_TYPE;
use reqwest::header::HeaderMap;
@@ -70,17 +64,6 @@ impl Client {
})
}
pub async fn from_auth(base_url: impl Into<String>, auth: &CodexAuth) -> Result<Self> {
let token = auth.get_token().await.map_err(anyhow::Error::from)?;
let mut client = Self::new(base_url)?
.with_user_agent(get_codex_user_agent())
.with_bearer_token(token);
if let Some(account_id) = auth.get_account_id() {
client = client.with_chatgpt_account_id(account_id);
}
Ok(client)
}
pub fn with_bearer_token(mut self, token: impl Into<String>) -> Self {
self.bearer_token = Some(token.into());
self
@@ -155,17 +138,6 @@ impl Client {
}
}
pub async fn get_rate_limits(&self) -> Result<RateLimitSnapshot> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/usage", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/usage", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
let payload: RateLimitStatusPayload = self.decode_json(&url, &ct, &body)?;
Ok(Self::rate_limit_snapshot_from_payload(payload))
}
pub async fn list_tasks(
&self,
limit: Option<i32>,
@@ -269,49 +241,4 @@ impl Client {
Err(e) => anyhow::bail!("Decode error for {url}: {e}; content-type={ct}; body={body}"),
}
}
// rate limit helpers
fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot {
let Some(details) = payload
.rate_limit
.and_then(|inner| inner.map(|boxed| *boxed))
else {
return RateLimitSnapshot {
primary: None,
secondary: None,
};
};
RateLimitSnapshot {
primary: Self::map_rate_limit_window(details.primary_window),
secondary: Self::map_rate_limit_window(details.secondary_window),
}
}
fn map_rate_limit_window(
window: Option<Option<Box<RateLimitWindowSnapshot>>>,
) -> Option<RateLimitWindow> {
let snapshot = match window {
Some(Some(snapshot)) => *snapshot,
_ => return None,
};
let used_percent = f64::from(snapshot.used_percent);
let window_minutes = Self::window_minutes_from_seconds(snapshot.limit_window_seconds);
let resets_at = Some(i64::from(snapshot.reset_at));
Some(RateLimitWindow {
used_percent,
window_minutes,
resets_at,
})
}
fn window_minutes_from_seconds(seconds: i32) -> Option<i64> {
if seconds <= 0 {
return None;
}
let seconds_i64 = i64::from(seconds);
Some((seconds_i64 + 59) / 60)
}
}

View File

@@ -1,8 +1,4 @@
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;
pub use codex_backend_openapi_models::models::PlanType;
pub use codex_backend_openapi_models::models::RateLimitStatusDetails;
pub use codex_backend_openapi_models::models::RateLimitStatusPayload;
pub use codex_backend_openapi_models::models::RateLimitWindowSnapshot;
pub use codex_backend_openapi_models::models::TaskListItem;
use serde::Deserialize;

View File

@@ -14,7 +14,7 @@ codex-core = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
codex-git = { workspace = true }
codex-git-apply = { path = "../git-apply" }
[dev-dependencies]
tempfile = { workspace = true }

View File

@@ -32,8 +32,7 @@ pub async fn run_apply_command(
)
.await?;
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
.await?;
init_chatgpt_token_from_auth(&config.codex_home).await?;
let task_response = get_task(&config, apply_cli.task_id).await?;
apply_diff_from_task(task_response, cwd).await
@@ -59,13 +58,13 @@ pub async fn apply_diff_from_task(
async fn apply_diff(diff: &str, cwd: Option<PathBuf>) -> anyhow::Result<()> {
let cwd = cwd.unwrap_or(std::env::current_dir().unwrap_or_else(|_| std::env::temp_dir()));
let req = codex_git::ApplyGitRequest {
let req = codex_git_apply::ApplyGitRequest {
cwd,
diff: diff.to_string(),
revert: false,
preflight: false,
};
let res = codex_git::apply_git_patch(&req)?;
let res = codex_git_apply::apply_git_patch(&req)?;
if res.exit_code != 0 {
anyhow::bail!(
"Git apply failed (applied={}, skipped={}, conflicts={})\nstdout:\n{}\nstderr:\n{}",

View File

@@ -13,8 +13,7 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
path: String,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
.await?;
init_chatgpt_token_from_auth(&config.codex_home).await?;
// Make direct HTTP request to ChatGPT backend API with the token
let client = create_client();

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use std::sync::LazyLock;
use std::sync::RwLock;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::token_data::TokenData;
static CHATGPT_TOKEN: LazyLock<RwLock<Option<TokenData>>> = LazyLock::new(|| RwLock::new(None));
@@ -19,11 +18,8 @@ pub fn set_chatgpt_token_data(value: TokenData) {
}
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let auth = CodexAuth::from_auth_storage(codex_home, auth_credentials_store_mode)?;
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth = CodexAuth::from_codex_home(codex_home)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);

View File

@@ -47,9 +47,6 @@ tokio = { workspace = true, features = [
"signal",
] }
[target.'cfg(target_os = "windows")'.dependencies]
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
[dev-dependencies]
assert_cmd = { workspace = true }
assert_matches = { workspace = true }

View File

@@ -11,7 +11,6 @@ use codex_protocol::config_types::SandboxMode;
use crate::LandlockCommand;
use crate::SeatbeltCommand;
use crate::WindowsCommand;
use crate::exit_status::handle_exit_status;
pub async fn run_command_under_seatbelt(
@@ -52,29 +51,9 @@ pub async fn run_command_under_landlock(
.await
}
pub async fn run_command_under_windows(
command: WindowsCommand,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> anyhow::Result<()> {
let WindowsCommand {
full_auto,
config_overrides,
command,
} = command;
run_command_under_sandbox(
full_auto,
command,
config_overrides,
codex_linux_sandbox_exe,
SandboxType::Windows,
)
.await
}
enum SandboxType {
Seatbelt,
Landlock,
Windows,
}
async fn run_command_under_sandbox(
@@ -108,63 +87,6 @@ async fn run_command_under_sandbox(
let stdio_policy = StdioPolicy::Inherit;
let env = create_env(&config.shell_environment_policy);
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
if let SandboxType::Windows = sandbox_type {
#[cfg(target_os = "windows")]
{
use codex_windows_sandbox::run_windows_sandbox_capture;
let policy_str = match &config.sandbox_policy {
codex_core::protocol::SandboxPolicy::DangerFullAccess => "workspace-write",
codex_core::protocol::SandboxPolicy::ReadOnly => "read-only",
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
};
let sandbox_cwd = sandbox_policy_cwd.clone();
let cwd_clone = cwd.clone();
let env_map = env.clone();
let command_vec = command.clone();
let res = tokio::task::spawn_blocking(move || {
run_windows_sandbox_capture(
policy_str,
&sandbox_cwd,
command_vec,
&cwd_clone,
env_map,
None,
)
})
.await;
let capture = match res {
Ok(Ok(v)) => v,
Ok(Err(err)) => {
eprintln!("windows sandbox failed: {err}");
std::process::exit(1);
}
Err(join_err) => {
eprintln!("windows sandbox join error: {join_err}");
std::process::exit(1);
}
};
if !capture.stdout.is_empty() {
use std::io::Write;
let _ = std::io::stdout().write_all(&capture.stdout);
}
if !capture.stderr.is_empty() {
use std::io::Write;
let _ = std::io::stderr().write_all(&capture.stderr);
}
std::process::exit(capture.exit_code);
}
#[cfg(not(target_os = "windows"))]
{
anyhow::bail!("Windows sandbox is only available on Windows");
}
}
let mut child = match sandbox_type {
SandboxType::Seatbelt => {
spawn_command_under_seatbelt(
@@ -193,9 +115,6 @@ async fn run_command_under_sandbox(
)
.await?
}
SandboxType::Windows => {
unreachable!("Windows sandbox should have been handled above");
}
};
let status = child.wait().await?;

View File

@@ -32,17 +32,3 @@ pub struct LandlockCommand {
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct WindowsCommand {
/// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR)
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
/// Full command args to run under Windows restricted token sandbox.
#[arg(trailing_var_arg = true)]
pub command: Vec<String>,
}

View File

@@ -1,7 +1,6 @@
use codex_app_server_protocol::AuthMode;
use codex_common::CliConfigOverrides;
use codex_core::CodexAuth;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::login_with_api_key;
use codex_core::auth::logout;
@@ -18,13 +17,11 @@ use std::path::PathBuf;
pub async fn login_with_chatgpt(
codex_home: PathBuf,
forced_chatgpt_workspace_id: Option<String>,
cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let opts = ServerOptions::new(
codex_home,
CLIENT_ID.to_string(),
forced_chatgpt_workspace_id,
cli_auth_credentials_store_mode,
);
let server = run_login_server(opts)?;
@@ -46,13 +43,7 @@ pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) ->
let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone();
match login_with_chatgpt(
config.codex_home,
forced_chatgpt_workspace_id,
config.cli_auth_credentials_store_mode,
)
.await
{
match login_with_chatgpt(config.codex_home, forced_chatgpt_workspace_id).await {
Ok(_) => {
eprintln!("Successfully logged in");
std::process::exit(0);
@@ -75,11 +66,7 @@ pub async fn run_login_with_api_key(
std::process::exit(1);
}
match login_with_api_key(
&config.codex_home,
&api_key,
config.cli_auth_credentials_store_mode,
) {
match login_with_api_key(&config.codex_home, &api_key) {
Ok(_) => {
eprintln!("Successfully logged in");
std::process::exit(0);
@@ -134,7 +121,6 @@ pub async fn run_login_with_device_code(
config.codex_home,
client_id.unwrap_or(CLIENT_ID.to_string()),
forced_chatgpt_workspace_id,
config.cli_auth_credentials_store_mode,
);
if let Some(iss) = issuer_base_url {
opts.issuer = iss;
@@ -154,7 +140,7 @@ pub async fn run_login_with_device_code(
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides).await;
match CodexAuth::from_auth_storage(&config.codex_home, config.cli_auth_credentials_store_mode) {
match CodexAuth::from_codex_home(&config.codex_home) {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token().await {
Ok(api_key) => {
@@ -185,7 +171,7 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides).await;
match logout(&config.codex_home, config.cli_auth_credentials_store_mode) {
match logout(&config.codex_home) {
Ok(true) => {
eprintln!("Successfully logged out");
std::process::exit(0);

View File

@@ -7,7 +7,6 @@ use codex_chatgpt::apply_command::ApplyCommand;
use codex_chatgpt::apply_command::run_apply_command;
use codex_cli::LandlockCommand;
use codex_cli::SeatbeltCommand;
use codex_cli::WindowsCommand;
use codex_cli::login::read_api_key_from_stdin;
use codex_cli::login::run_login_status;
use codex_cli::login::run_login_with_api_key;
@@ -20,7 +19,7 @@ use codex_exec::Cli as ExecCli;
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
use codex_tui::updates::UpdateAction;
use codex_tui::UpdateAction;
use owo_colors::OwoColorize;
use std::path::PathBuf;
use supports_color::Stream;
@@ -30,7 +29,6 @@ mod mcp_cmd;
use crate::mcp_cmd::McpCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::features::is_known_feature_key;
/// Codex CLI
///
@@ -152,9 +150,6 @@ enum SandboxCommand {
/// Run a command under Landlock+seccomp (Linux only).
#[clap(visible_alias = "landlock")]
Linux(LandlockCommand),
/// Run a command under Windows restricted token (Windows only).
Windows(WindowsCommand),
}
#[derive(Debug, Parser)]
@@ -291,25 +286,15 @@ struct FeatureToggles {
}
impl FeatureToggles {
fn to_overrides(&self) -> anyhow::Result<Vec<String>> {
fn to_overrides(&self) -> Vec<String> {
let mut v = Vec::new();
for feature in &self.enable {
Self::validate_feature(feature)?;
v.push(format!("features.{feature}=true"));
for k in &self.enable {
v.push(format!("features.{k}=true"));
}
for feature in &self.disable {
Self::validate_feature(feature)?;
v.push(format!("features.{feature}=false"));
}
Ok(v)
}
fn validate_feature(feature: &str) -> anyhow::Result<()> {
if is_known_feature_key(feature) {
Ok(())
} else {
anyhow::bail!("Unknown feature flag: {feature}")
for k in &self.disable {
v.push(format!("features.{k}=false"));
}
v
}
}
@@ -360,8 +345,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
} = MultitoolCli::parse();
// Fold --enable/--disable into config overrides so they flow to all subcommands.
let toggle_overrides = feature_toggles.to_overrides()?;
root_config_overrides.raw_overrides.extend(toggle_overrides);
root_config_overrides
.raw_overrides
.extend(feature_toggles.to_overrides());
match subcommand {
None => {
@@ -476,17 +462,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
)
.await?;
}
SandboxCommand::Windows(mut windows_cli) => {
prepend_config_flags(
&mut windows_cli.config_overrides,
root_config_overrides.clone(),
);
codex_cli::debug_sandbox::run_command_under_windows(
windows_cli,
codex_linux_sandbox_exe,
)
.await?;
}
},
Some(Subcommand::Apply(mut apply_cli)) => {
prepend_config_flags(
@@ -512,7 +487,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
// Respect root-level `-c` overrides plus top-level flags like `--profile`.
let cli_kv_overrides = root_config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
.map_err(|e| anyhow::anyhow!(e))?;
// Thread through relevant top-level flags (at minimum, `--profile`).
// Also honor `--search` since it maps to a feature toggle.
@@ -630,7 +605,6 @@ mod tests {
use assert_matches::assert_matches;
use codex_core::protocol::TokenUsage;
use codex_protocol::ConversationId;
use pretty_assertions::assert_eq;
fn finalize_from_args(args: &[&str]) -> TuiCli {
let cli = MultitoolCli::try_parse_from(args).expect("parse");
@@ -807,32 +781,4 @@ mod tests {
assert!(!interactive.resume_last);
assert_eq!(interactive.resume_session_id, None);
}
#[test]
fn feature_toggles_known_features_generate_overrides() {
let toggles = FeatureToggles {
enable: vec!["web_search_request".to_string()],
disable: vec!["unified_exec".to_string()],
};
let overrides = toggles.to_overrides().expect("valid features");
assert_eq!(
overrides,
vec![
"features.web_search_request=true".to_string(),
"features.unified_exec=false".to_string(),
]
);
}
#[test]
fn feature_toggles_unknown_feature_errors() {
let toggles = FeatureToggles {
enable: vec!["does_not_exist".to_string()],
disable: Vec::new(),
};
let err = toggles
.to_overrides()
.expect_err("feature should be rejected");
assert_eq!(err.to_string(), "Unknown feature flag: does_not_exist");
}
}

View File

@@ -9,11 +9,11 @@ use codex_common::CliConfigOverrides;
use codex_common::format_env_display::format_env_display;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::find_codex_home;
use codex_core::config::load_global_mcp_servers;
use codex_core::config::types::McpServerConfig;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config::write_global_mcp_servers;
use codex_core::config_types::McpServerConfig;
use codex_core::config_types::McpServerTransportConfig;
use codex_core::features::Feature;
use codex_core::mcp::auth::compute_auth_statuses;
use codex_core::protocol::McpAuthStatus;
@@ -150,10 +150,6 @@ pub struct RemoveArgs {
pub struct LoginArgs {
/// Name of the MCP server to authenticate with oauth.
pub name: String,
/// Comma-separated list of OAuth scopes to request.
#[arg(long, value_delimiter = ',', value_name = "SCOPE,SCOPE")]
pub scopes: Vec<String>,
}
#[derive(Debug, clap::Parser)]
@@ -196,9 +192,7 @@ impl McpCli {
async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
// Validate any provided overrides even though they are not currently applied.
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -259,16 +253,11 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
enabled: true,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
};
servers.insert(name.clone(), new_entry);
ConfigEditsBuilder::new(&codex_home)
.replace_mcp_servers(&servers)
.apply()
.await
write_global_mcp_servers(&codex_home, &servers)
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
println!("Added global MCP server '{name}'.");
@@ -279,42 +268,25 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
http_headers,
env_http_headers,
} = transport
&& matches!(supports_oauth_login(&url).await, Ok(true))
{
match supports_oauth_login(&url).await {
Ok(true) => {
if !config.features.enabled(Feature::RmcpClient) {
println!(
"MCP server supports login. Add `experimental_use_rmcp_client = true` \
to your config.toml and run `codex mcp login {name}` to login."
);
} else {
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&url,
config.mcp_oauth_credentials_store_mode,
http_headers.clone(),
env_http_headers.clone(),
&Vec::new(),
)
.await?;
println!("Successfully logged in.");
}
}
Ok(false) => {}
Err(_) => println!(
"MCP server may or may not require login. Run `codex mcp login {name}` to login."
),
}
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&url,
config.mcp_oauth_credentials_store_mode,
http_headers.clone(),
env_http_headers.clone(),
)
.await?;
println!("Successfully logged in.");
}
Ok(())
}
async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let RemoveArgs { name } = remove_args;
@@ -328,10 +300,7 @@ async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveAr
let removed = servers.remove(&name).is_some();
if removed {
ConfigEditsBuilder::new(&codex_home)
.replace_mcp_servers(&servers)
.apply()
.await
write_global_mcp_servers(&codex_home, &servers)
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
}
@@ -345,18 +314,18 @@ async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveAr
}
async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
if !config.features.enabled(Feature::RmcpClient) {
bail!("OAuth login is only supported when [feature].rmcp_client is true in config.toml.");
bail!(
"OAuth login is only supported when experimental_use_rmcp_client is true in config.toml."
);
}
let LoginArgs { name, scopes } = login_args;
let LoginArgs { name } = login_args;
let Some(server) = config.mcp_servers.get(&name) else {
bail!("No MCP server named '{name}' found.");
@@ -378,7 +347,6 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
config.mcp_oauth_credentials_store_mode,
http_headers,
env_http_headers,
&scopes,
)
.await?;
println!("Successfully logged in to MCP server '{name}'.");
@@ -386,9 +354,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
}
async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -415,9 +381,7 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
}
async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -436,7 +400,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
.map(|(name, cfg)| {
let auth_status = auth_statuses
.get(name.as_str())
.map(|entry| entry.auth_status)
.copied()
.unwrap_or(McpAuthStatus::Unsupported);
let transport = match &cfg.transport {
McpServerTransportConfig::Stdio {
@@ -523,7 +487,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
};
let auth_status = auth_statuses
.get(name.as_str())
.map(|entry| entry.auth_status)
.copied()
.unwrap_or(McpAuthStatus::Unsupported)
.to_string();
stdio_rows.push([
@@ -548,15 +512,13 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
};
let auth_status = auth_statuses
.get(name.as_str())
.map(|entry| entry.auth_status)
.copied()
.unwrap_or(McpAuthStatus::Unsupported)
.to_string();
let bearer_token_display =
bearer_token_env_var.as_deref().unwrap_or("-").to_string();
http_rows.push([
name.clone(),
url.clone(),
bearer_token_display,
bearer_token_env_var.clone().unwrap_or("-".to_string()),
status,
auth_status,
]);
@@ -672,9 +634,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
}
async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
let overrides = config_overrides
.parse_overrides()
.map_err(anyhow::Error::msg)?;
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
.await
.context("failed to load configuration")?;
@@ -716,8 +676,6 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
"name": get_args.name,
"enabled": server.enabled,
"transport": transport,
"enabled_tools": server.enabled_tools.clone(),
"disabled_tools": server.disabled_tools.clone(),
"startup_timeout_sec": server
.startup_timeout_sec
.map(|timeout| timeout.as_secs_f64()),
@@ -729,28 +687,8 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
return Ok(());
}
if !server.enabled {
println!("{} (disabled)", get_args.name);
return Ok(());
}
println!("{}", get_args.name);
println!(" enabled: {}", server.enabled);
let format_tool_list = |tools: &Option<Vec<String>>| -> String {
match tools {
Some(list) if list.is_empty() => "[]".to_string(),
Some(list) => list.join(", "),
None => "-".to_string(),
}
};
if server.enabled_tools.is_some() {
let enabled_tools_display = format_tool_list(&server.enabled_tools);
println!(" enabled_tools: {enabled_tools_display}");
}
if server.disabled_tools.is_some() {
let disabled_tools_display = format_tool_list(&server.disabled_tools);
println!(" disabled_tools: {disabled_tools_display}");
}
match &server.transport {
McpServerTransportConfig::Stdio {
command,
@@ -784,15 +722,15 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
} => {
println!(" transport: streamable_http");
println!(" url: {url}");
let bearer_token_display = bearer_token_env_var.as_deref().unwrap_or("-");
println!(" bearer_token_env_var: {bearer_token_display}");
let env_var = bearer_token_env_var.as_deref().unwrap_or("-");
println!(" bearer_token_env_var: {env_var}");
let headers_display = match http_headers {
Some(map) if !map.is_empty() => {
let mut pairs: Vec<_> = map.iter().collect();
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
pairs
.into_iter()
.map(|(k, _)| format!("{k}=*****"))
.map(|(k, v)| format!("{k}={v}"))
.collect::<Vec<_>>()
.join(", ")
}
@@ -805,7 +743,7 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
pairs
.into_iter()
.map(|(k, var)| format!("{k}={var}"))
.map(|(k, v)| format!("{k}={v}"))
.collect::<Vec<_>>()
.join(", ")
}

View File

@@ -2,7 +2,7 @@ use std::path::Path;
use anyhow::Result;
use codex_core::config::load_global_mcp_servers;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config_types::McpServerTransportConfig;
use predicates::str::contains;
use pretty_assertions::assert_eq;
use tempfile::TempDir;

View File

@@ -1,9 +1,9 @@
use std::path::Path;
use anyhow::Result;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::load_global_mcp_servers;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config::write_global_mcp_servers;
use codex_core::config_types::McpServerTransportConfig;
use predicates::prelude::PredicateBooleanExt;
use predicates::str::contains;
use pretty_assertions::assert_eq;
@@ -59,9 +59,7 @@ async fn list_and_get_render_expected_output() -> Result<()> {
}
other => panic!("unexpected transport: {other:?}"),
}
ConfigEditsBuilder::new(codex_home.path())
.replace_mcp_servers(&servers)
.apply_blocking()?;
write_global_mcp_servers(codex_home.path(), &servers)?;
let mut list_cmd = codex_command(codex_home.path())?;
let list_output = list_cmd.args(["mcp", "list"]).output()?;
@@ -70,9 +68,9 @@ async fn list_and_get_render_expected_output() -> Result<()> {
assert!(stdout.contains("Name"));
assert!(stdout.contains("docs"));
assert!(stdout.contains("docs-server"));
assert!(stdout.contains("TOKEN=*****"));
assert!(stdout.contains("APP_TOKEN=*****"));
assert!(stdout.contains("WORKSPACE_ID=*****"));
assert!(stdout.contains("TOKEN=secret"));
assert!(stdout.contains("APP_TOKEN=$APP_TOKEN"));
assert!(stdout.contains("WORKSPACE_ID=$WORKSPACE_ID"));
assert!(stdout.contains("Status"));
assert!(stdout.contains("Auth"));
assert!(stdout.contains("enabled"));
@@ -121,9 +119,9 @@ async fn list_and_get_render_expected_output() -> Result<()> {
assert!(stdout.contains("transport: stdio"));
assert!(stdout.contains("command: docs-server"));
assert!(stdout.contains("args: --port 4000"));
assert!(stdout.contains("env: TOKEN=*****"));
assert!(stdout.contains("APP_TOKEN=*****"));
assert!(stdout.contains("WORKSPACE_ID=*****"));
assert!(stdout.contains("env: TOKEN=secret"));
assert!(stdout.contains("APP_TOKEN=$APP_TOKEN"));
assert!(stdout.contains("WORKSPACE_ID=$WORKSPACE_ID"));
assert!(stdout.contains("enabled: true"));
assert!(stdout.contains("remove: codex mcp remove docs"));
@@ -136,30 +134,3 @@ async fn list_and_get_render_expected_output() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn get_disabled_server_shows_single_line() -> Result<()> {
let codex_home = TempDir::new()?;
let mut add = codex_command(codex_home.path())?;
add.args(["mcp", "add", "docs", "--", "docs-server"])
.assert()
.success();
let mut servers = load_global_mcp_servers(codex_home.path()).await?;
let docs = servers
.get_mut("docs")
.expect("docs server should exist after add");
docs.enabled = false;
ConfigEditsBuilder::new(codex_home.path())
.replace_mcp_servers(&servers)
.apply_blocking()?;
let mut get_cmd = codex_command(codex_home.path())?;
let get_output = get_cmd.args(["mcp", "get", "docs"]).output()?;
assert!(get_output.status.success());
let stdout = String::from_utf8(get_output.stdout)?;
assert_eq!(stdout.trim_end(), "docs (disabled)");
Ok(())
}

View File

@@ -22,6 +22,6 @@ chrono = { version = "0.4", features = ["serde"] }
diffy = "0.4.2"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "2.0.17"
thiserror = "2.0.12"
codex-backend-client = { path = "../backend-client", optional = true }
codex-git = { workspace = true }
codex-git-apply = { path = "../git-apply" }

View File

@@ -362,13 +362,13 @@ mod api {
});
}
let req = codex_git::ApplyGitRequest {
let req = codex_git_apply::ApplyGitRequest {
cwd: std::env::current_dir().unwrap_or_else(|_| std::env::temp_dir()),
diff: diff.clone(),
revert: false,
preflight,
};
let r = codex_git::apply_git_patch(&req)
let r = codex_git_apply::apply_git_patch(&req)
.map_err(|e| CloudTaskError::Io(format!("git apply failed to run: {e}")))?;
let status = if r.exit_code == 0 {

View File

@@ -26,4 +26,4 @@ pub use mock::MockClient;
#[cfg(feature = "online")]
pub use http::HttpClient;
// Reusable apply engine now lives in the shared crate `codex-git`.
// Reusable apply engine now lives in the shared crate `codex-git-apply`.

View File

@@ -58,16 +58,7 @@ async fn init_backend(user_agent_suffix: &str) -> anyhow::Result<BackendContext>
let auth = match codex_core::config::find_codex_home()
.ok()
.map(|home| {
let store_mode = codex_core::config::Config::load_from_base_config_with_overrides(
codex_core::config::ConfigToml::default(),
codex_core::config::ConfigOverrides::default(),
home.clone(),
)
.map(|cfg| cfg.cli_auth_credentials_store_mode)
.unwrap_or_default();
codex_login::AuthManager::new(home, false, store_mode)
})
.map(|home| codex_login::AuthManager::new(home, false))
.and_then(|am| am.auth())
{
Some(auth) => auth,
@@ -1095,19 +1086,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
let backend = Arc::clone(&backend);
let best_of_n = page.best_of_n;
tokio::spawn(async move {
let git_ref = if let Ok(cwd) = std::env::current_dir() {
if let Some(branch) = codex_core::git_info::default_branch_name(&cwd).await {
branch
} else if let Some(branch) = codex_core::git_info::current_branch_name(&cwd).await {
branch
} else {
"main".to_string()
}
} else {
"main".to_string()
};
let result = codex_cloud_tasks_client::CloudBackend::create_task(&*backend, &env, &text, &git_ref, false, best_of_n).await;
let result = codex_cloud_tasks_client::CloudBackend::create_task(&*backend, &env, &text, "main", false, best_of_n).await;
let evt = match result {
Ok(ok) => app::AppEvent::NewTaskSubmitted(Ok(ok)),
Err(e) => app::AppEvent::NewTaskSubmitted(Err(format!("{e}"))),

View File

@@ -70,14 +70,7 @@ pub async fn build_chatgpt_headers() -> HeaderMap {
HeaderValue::from_str(&ua).unwrap_or(HeaderValue::from_static("codex-cli")),
);
if let Ok(home) = codex_core::config::find_codex_home() {
let store_mode = codex_core::config::Config::load_from_base_config_with_overrides(
codex_core::config::ConfigToml::default(),
codex_core::config::ConfigOverrides::default(),
home.clone(),
)
.map(|cfg| cfg.cli_auth_credentials_store_mode)
.unwrap_or_default();
let am = codex_login::AuthManager::new(home, false, store_mode);
let am = codex_login::AuthManager::new(home, false);
if let Some(auth) = am.auth()
&& let Ok(tok) = auth.get_token().await
&& !tok.is_empty()

View File

@@ -15,7 +15,3 @@ path = "src/lib.rs"
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_with = "3"
[package.metadata.cargo-shear]
ignored = ["serde_with"]

View File

@@ -3,7 +3,6 @@
// Currently export only the types referenced by the workspace
// The process for this will change
// Cloud Tasks
pub mod code_task_details_response;
pub use self::code_task_details_response::CodeTaskDetailsResponse;
@@ -21,14 +20,3 @@ pub use self::task_list_item::TaskListItem;
pub mod paginated_list_task_list_item_;
pub use self::paginated_list_task_list_item_::PaginatedListTaskListItem;
// Rate Limits
pub mod rate_limit_status_payload;
pub use self::rate_limit_status_payload::PlanType;
pub use self::rate_limit_status_payload::RateLimitStatusPayload;
pub mod rate_limit_status_details;
pub use self::rate_limit_status_details::RateLimitStatusDetails;
pub mod rate_limit_window_snapshot;
pub use self::rate_limit_window_snapshot::RateLimitWindowSnapshot;

View File

@@ -1,46 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct RateLimitStatusDetails {
#[serde(rename = "allowed")]
pub allowed: bool,
#[serde(rename = "limit_reached")]
pub limit_reached: bool,
#[serde(
rename = "primary_window",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub primary_window: Option<Option<Box<models::RateLimitWindowSnapshot>>>,
#[serde(
rename = "secondary_window",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub secondary_window: Option<Option<Box<models::RateLimitWindowSnapshot>>>,
}
impl RateLimitStatusDetails {
pub fn new(allowed: bool, limit_reached: bool) -> RateLimitStatusDetails {
RateLimitStatusDetails {
allowed,
limit_reached,
primary_window: None,
secondary_window: None,
}
}
}

View File

@@ -1,65 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct RateLimitStatusPayload {
#[serde(rename = "plan_type")]
pub plan_type: PlanType,
#[serde(
rename = "rate_limit",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub rate_limit: Option<Option<Box<models::RateLimitStatusDetails>>>,
}
impl RateLimitStatusPayload {
pub fn new(plan_type: PlanType) -> RateLimitStatusPayload {
RateLimitStatusPayload {
plan_type,
rate_limit: None,
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum PlanType {
#[serde(rename = "free")]
Free,
#[serde(rename = "go")]
Go,
#[serde(rename = "plus")]
Plus,
#[serde(rename = "pro")]
Pro,
#[serde(rename = "team")]
Team,
#[serde(rename = "business")]
Business,
#[serde(rename = "education")]
Education,
#[serde(rename = "quorum")]
Quorum,
#[serde(rename = "enterprise")]
Enterprise,
#[serde(rename = "edu")]
Edu,
}
impl Default for PlanType {
fn default() -> PlanType {
Self::Free
}
}

View File

@@ -1,40 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct RateLimitWindowSnapshot {
#[serde(rename = "used_percent")]
pub used_percent: i32,
#[serde(rename = "limit_window_seconds")]
pub limit_window_seconds: i32,
#[serde(rename = "reset_after_seconds")]
pub reset_after_seconds: i32,
#[serde(rename = "reset_at")]
pub reset_at: i32,
}
impl RateLimitWindowSnapshot {
pub fn new(
used_percent: i32,
limit_window_seconds: i32,
reset_after_seconds: i32,
reset_at: i32,
) -> RateLimitWindowSnapshot {
RateLimitWindowSnapshot {
used_percent,
limit_window_seconds,
reset_after_seconds,
reset_at,
}
}
}

View File

@@ -24,21 +24,21 @@ pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
ApprovalPreset {
id: "read-only",
label: "Read Only",
description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network.",
description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::ReadOnly,
},
ApprovalPreset {
id: "auto",
label: "Auto",
description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network.",
description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::new_workspace_write_policy(),
},
ApprovalPreset {
id: "full-access",
label: "Full Access",
description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution.",
description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution",
approval: AskForApproval::Never,
sandbox: SandboxPolicy::DangerFullAccess,
},

View File

@@ -6,11 +6,15 @@ pub fn format_env_display(env: Option<&HashMap<String, String>>, env_vars: &[Str
if let Some(map) = env {
let mut pairs: Vec<_> = map.iter().collect();
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
parts.extend(pairs.into_iter().map(|(key, _)| format!("{key}=*****")));
parts.extend(
pairs
.into_iter()
.map(|(key, value)| format!("{key}={value}")),
);
}
if !env_vars.is_empty() {
parts.extend(env_vars.iter().map(|var| format!("{var}=*****")));
parts.extend(env_vars.iter().map(|var| format!("{var}=${var}")));
}
if parts.is_empty() {
@@ -38,14 +42,14 @@ mod tests {
env.insert("B".to_string(), "two".to_string());
env.insert("A".to_string(), "one".to_string());
assert_eq!(format_env_display(Some(&env), &[]), "A=*****, B=*****");
assert_eq!(format_env_display(Some(&env), &[]), "A=one, B=two");
}
#[test]
fn formats_env_vars_with_dollar_prefix() {
let vars = vec!["TOKEN".to_string(), "PATH".to_string()];
assert_eq!(format_env_display(None, &vars), "TOKEN=*****, PATH=*****");
assert_eq!(format_env_display(None, &vars), "TOKEN=$TOKEN, PATH=$PATH");
}
#[test]
@@ -56,7 +60,7 @@ mod tests {
assert_eq!(
format_env_display(Some(&env), &vars),
"HOME=*****, TOKEN=*****"
"HOME=/tmp, TOKEN=$TOKEN"
);
}
}

View File

@@ -1,96 +1,73 @@
use codex_app_server_protocol::AuthMode;
use codex_core::protocol_config_types::ReasoningEffort;
/// A reasoning effort option that can be surfaced for a model.
#[derive(Debug, Clone, Copy)]
pub struct ReasoningEffortPreset {
/// Effort level that the model supports.
pub effort: ReasoningEffort,
/// Short human description shown next to the effort in UIs.
pub description: &'static str,
}
/// Metadata describing a Codex-supported model.
/// A simple preset pairing a model slug with a reasoning effort.
#[derive(Debug, Clone, Copy)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Display name shown in UIs.
pub display_name: &'static str,
/// Short human description shown in UIs.
pub description: &'static str,
/// Reasoning effort applied when none is explicitly chosen.
pub default_reasoning_effort: ReasoningEffort,
/// Supported reasoning effort options.
pub supported_reasoning_efforts: &'static [ReasoningEffortPreset],
/// Whether this is the default model for new users.
pub is_default: bool,
/// Reasoning effort to apply for this preset.
pub effort: Option<ReasoningEffort>,
}
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-codex",
id: "gpt-5-codex-low",
label: "gpt-5-codex low",
description: "Fastest responses with limited reasoning",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: true,
effort: Some(ReasoningEffort::Low),
},
ModelPreset {
id: "gpt-5",
id: "gpt-5-codex-medium",
label: "gpt-5-codex medium",
description: "Dynamically adjusts reasoning based on the task",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-codex-high",
label: "gpt-5-codex high",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::High),
},
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "Fastest responses with little reasoning",
model: "gpt-5",
display_name: "gpt-5",
description: "Broad world knowledge with strong general reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
effort: Some(ReasoningEffort::Minimal),
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: Some(ReasoningEffort::Low),
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: Some(ReasoningEffort::High),
},
];
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}
}

View File

@@ -21,30 +21,20 @@ bytes = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
codex-app-server-protocol = { workspace = true }
codex-apply-patch = { workspace = true }
codex-async-utils = { workspace = true }
codex-file-search = { workspace = true }
codex-git = { workspace = true }
codex-keyring-store = { workspace = true }
codex-mcp-client = { workspace = true }
codex-otel = { workspace = true, features = ["otel"] }
codex-protocol = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-utils-pty = { workspace = true }
codex-utils-readiness = { workspace = true }
codex-async-utils = { workspace = true }
codex-utils-string = { workspace = true }
codex-utils-tokenizer = { workspace = true }
codex-utils-pty = { workspace = true }
dirs = { workspace = true }
dunce = { workspace = true }
env-flags = { workspace = true }
eventsource-stream = { workspace = true }
futures = { workspace = true }
http = { workspace = true }
indexmap = { workspace = true }
keyring = { workspace = true, features = [
"apple-native",
"crypto-rust",
"linux-native-async-persistent",
"windows-native",
] }
libc = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
@@ -54,7 +44,6 @@ reqwest = { workspace = true, features = ["json", "stream"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha1 = { workspace = true }
sha2 = { workspace = true }
shlex = { workspace = true }
similar = { workspace = true }
strum_macros = { workspace = true }
@@ -83,7 +72,6 @@ tree-sitter-bash = { workspace = true }
uuid = { workspace = true, features = ["serde", "v4"] }
which = { workspace = true }
wildmatch = { workspace = true }
codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
[target.'cfg(target_os = "linux")'.dependencies]
@@ -106,7 +94,6 @@ assert_cmd = { workspace = true }
assert_matches = { workspace = true }
core_test_support = { workspace = true }
escargot = { workspace = true }
image = { workspace = true, features = ["jpeg", "png"] }
maplit = { workspace = true }
predicates = { workspace = true }
pretty_assertions = { workspace = true }

View File

@@ -82,6 +82,6 @@ OUTPUT FORMAT:
* **Do not** wrap the JSON in markdown fences or extra prose.
* The code_location field is required and must include absolute_file_path and line_range.
* Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
*Line ranges must be as short as possible for interpreting the issue (avoid ranges over 510 lines; pick the most suitable subrange).
* The code_location should overlap with the diff.
* Do not generate a PR fix.
* Do not generate a PR fix.

View File

@@ -36,6 +36,7 @@ pub(crate) struct ApplyPatchExec {
pub(crate) async fn apply_patch(
sess: &Session,
turn_context: &TurnContext,
sub_id: &str,
call_id: &str,
action: ApplyPatchAction,
) -> InternalApplyPatchInvocation {
@@ -61,13 +62,7 @@ pub(crate) async fn apply_patch(
// that similar patches can be auto-approved in the future during
// this session.
let rx_approve = sess
.request_patch_approval(
turn_context,
call_id.to_owned(),
convert_apply_patch_to_protocol(&action),
None,
None,
)
.request_patch_approval(sub_id.to_owned(), call_id.to_owned(), &action, None, None)
.await;
match rx_approve.await.unwrap_or_default() {
ReviewDecision::Approved | ReviewDecision::ApprovedForSession => {

View File

@@ -1,12 +1,16 @@
mod storage;
use chrono::DateTime;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
#[cfg(test)]
use serial_test::serial;
use std::env;
use std::fmt::Debug;
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Read;
use std::io::Write;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -16,16 +20,10 @@ use std::time::Duration;
use codex_app_server_protocol::AuthMode;
use codex_protocol::config_types::ForcedLoginMethod;
pub use crate::auth::storage::AuthCredentialsStoreMode;
pub use crate::auth::storage::AuthDotJson;
use crate::auth::storage::AuthStorageBackend;
use crate::auth::storage::create_auth_storage;
use crate::config::Config;
use crate::default_client::CodexHttpClient;
use crate::token_data::PlanType;
use crate::token_data::TokenData;
use crate::token_data::parse_id_token;
use crate::util::try_parse_error_message;
#[derive(Debug, Clone)]
pub struct CodexAuth {
@@ -33,8 +31,8 @@ pub struct CodexAuth {
pub(crate) api_key: Option<String>,
pub(crate) auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>,
storage: Arc<dyn AuthStorageBackend>,
pub(crate) client: CodexHttpClient,
pub(crate) auth_file: PathBuf,
pub(crate) client: reqwest::Client,
}
impl PartialEq for CodexAuth {
@@ -43,13 +41,8 @@ impl PartialEq for CodexAuth {
}
}
// TODO(pakrym): use token exp field to check for expiration instead
const TOKEN_REFRESH_INTERVAL: i64 = 8;
impl CodexAuth {
pub async fn refresh_token(&self) -> Result<String, std::io::Error> {
tracing::info!("Refreshing token");
let token_data = self
.get_current_token_data()
.ok_or(std::io::Error::other("Token data is not available."))?;
@@ -60,7 +53,7 @@ impl CodexAuth {
.map_err(std::io::Error::other)?;
let updated = update_tokens(
&self.storage,
&self.auth_file,
refresh_response.id_token,
refresh_response.access_token,
refresh_response.refresh_token,
@@ -82,12 +75,9 @@ impl CodexAuth {
Ok(access)
}
/// Loads the available auth information from auth storage.
pub fn from_auth_storage(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<CodexAuth>> {
load_auth(codex_home, false, auth_credentials_store_mode)
/// Loads the available auth information from the auth.json.
pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
load_auth(codex_home, false)
}
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
@@ -98,7 +88,7 @@ impl CodexAuth {
last_refresh: Some(last_refresh),
..
}) => {
if last_refresh < Utc::now() - chrono::Duration::days(TOKEN_REFRESH_INTERVAL) {
if last_refresh < Utc::now() - chrono::Duration::days(28) {
let refresh_response = tokio::time::timeout(
Duration::from_secs(60),
try_refresh_token(tokens.refresh_token.clone(), &self.client),
@@ -110,7 +100,7 @@ impl CodexAuth {
.map_err(std::io::Error::other)?;
let updated_auth_dot_json = update_tokens(
&self.storage,
&self.auth_file,
refresh_response.id_token,
refresh_response.access_token,
refresh_response.refresh_token,
@@ -184,17 +174,17 @@ impl CodexAuth {
Self {
api_key: None,
mode: AuthMode::ChatGPT,
storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File),
auth_file: PathBuf::new(),
auth_dot_json,
client: crate::default_client::create_client(),
}
}
fn from_api_key_with_client(api_key: &str, client: CodexHttpClient) -> Self {
fn from_api_key_with_client(api_key: &str, client: reqwest::Client) -> Self {
Self {
api_key: Some(api_key.to_owned()),
mode: AuthMode::ApiKey,
storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File),
auth_file: PathBuf::new(),
auth_dot_json: Arc::new(Mutex::new(None)),
client,
}
@@ -222,57 +212,33 @@ pub fn read_codex_api_key_from_env() -> Option<String> {
.filter(|value| !value.is_empty())
}
pub fn get_auth_file(codex_home: &Path) -> PathBuf {
codex_home.join("auth.json")
}
/// Delete the auth.json file inside `codex_home` if it exists. Returns `Ok(true)`
/// if a file was removed, `Ok(false)` if no auth file was present.
pub fn logout(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<bool> {
let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode);
storage.delete()
pub fn logout(codex_home: &Path) -> std::io::Result<bool> {
let auth_file = get_auth_file(codex_home);
match std::fs::remove_file(&auth_file) {
Ok(_) => Ok(true),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
Err(err) => Err(err),
}
}
/// Writes an `auth.json` that contains only the API key.
pub fn login_with_api_key(
codex_home: &Path,
api_key: &str,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<()> {
let auth_dot_json = AuthDotJson {
openai_api_key: Some(api_key.to_string()),
tokens: None,
last_refresh: None,
};
save_auth(codex_home, &auth_dot_json, auth_credentials_store_mode)
}
/// Persist the provided auth payload using the specified backend.
pub fn save_auth(
codex_home: &Path,
auth: &AuthDotJson,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode);
storage.save(auth)
}
/// Load CLI auth data using the configured credential store backend.
/// Returns `None` when no credentials are stored.
pub fn load_auth_dot_json(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<AuthDotJson>> {
let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode);
storage.load()
write_auth_json(&get_auth_file(codex_home), &auth_dot_json)
}
pub async fn enforce_login_restrictions(config: &Config) -> std::io::Result<()> {
let Some(auth) = load_auth(
&config.codex_home,
true,
config.cli_auth_credentials_store_mode,
)?
else {
let Some(auth) = load_auth(&config.codex_home, true)? else {
return Ok(());
};
@@ -291,11 +257,7 @@ pub async fn enforce_login_restrictions(config: &Config) -> std::io::Result<()>
};
if let Some(message) = method_violation {
return logout_with_message(
&config.codex_home,
message,
config.cli_auth_credentials_store_mode,
);
return logout_with_message(&config.codex_home, message);
}
}
@@ -312,7 +274,6 @@ pub async fn enforce_login_restrictions(config: &Config) -> std::io::Result<()>
format!(
"Failed to load ChatGPT credentials while enforcing workspace restrictions: {err}. Logging out."
),
config.cli_auth_credentials_store_mode,
);
}
};
@@ -328,23 +289,15 @@ pub async fn enforce_login_restrictions(config: &Config) -> std::io::Result<()>
"Login is restricted to workspace {expected_account_id}, but current credentials lack a workspace identifier. Logging out."
),
};
return logout_with_message(
&config.codex_home,
message,
config.cli_auth_credentials_store_mode,
);
return logout_with_message(&config.codex_home, message);
}
}
Ok(())
}
fn logout_with_message(
codex_home: &Path,
message: String,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
match logout(codex_home, auth_credentials_store_mode) {
fn logout_with_message(codex_home: &Path, message: String) -> std::io::Result<()> {
match logout(codex_home) {
Ok(_) => Err(std::io::Error::other(message)),
Err(err) => Err(std::io::Error::other(format!(
"{message}. Failed to remove auth.json: {err}"
@@ -355,7 +308,6 @@ fn logout_with_message(
fn load_auth(
codex_home: &Path,
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<CodexAuth>> {
if enable_codex_api_key_env && let Some(api_key) = read_codex_api_key_from_env() {
let client = crate::default_client::create_client();
@@ -365,12 +317,12 @@ fn load_auth(
)));
}
let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode);
let auth_file = get_auth_file(codex_home);
let client = crate::default_client::create_client();
let auth_dot_json = match storage.load()? {
Some(auth) => auth,
None => return Ok(None),
let auth_dot_json = match try_read_auth_json(&auth_file) {
Ok(auth) => auth,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(err) => return Err(err),
};
let AuthDotJson {
@@ -387,7 +339,7 @@ fn load_auth(
Ok(Some(CodexAuth {
api_key: None,
mode: AuthMode::ChatGPT,
storage: storage.clone(),
auth_file,
auth_dot_json: Arc::new(Mutex::new(Some(AuthDotJson {
openai_api_key: None,
tokens,
@@ -397,20 +349,44 @@ fn load_auth(
}))
}
/// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure after refreshing if necessary.
pub fn try_read_auth_json(auth_file: &Path) -> std::io::Result<AuthDotJson> {
let mut file = File::open(auth_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?;
Ok(auth_dot_json)
}
pub fn write_auth_json(auth_file: &Path, auth_dot_json: &AuthDotJson) -> std::io::Result<()> {
if let Some(parent) = auth_file.parent() {
std::fs::create_dir_all(parent)?;
}
let json_data = serde_json::to_string_pretty(auth_dot_json)?;
let mut options = OpenOptions::new();
options.truncate(true).write(true).create(true);
#[cfg(unix)]
{
options.mode(0o600);
}
let mut file = options.open(auth_file)?;
file.write_all(json_data.as_bytes())?;
file.flush()?;
Ok(())
}
async fn update_tokens(
storage: &Arc<dyn AuthStorageBackend>,
id_token: Option<String>,
auth_file: &Path,
id_token: String,
access_token: Option<String>,
refresh_token: Option<String>,
) -> std::io::Result<AuthDotJson> {
let mut auth_dot_json = storage
.load()?
.ok_or(std::io::Error::other("Token data is not available."))?;
let mut auth_dot_json = try_read_auth_json(auth_file)?;
let tokens = auth_dot_json.tokens.get_or_insert_with(TokenData::default);
if let Some(id_token) = id_token {
tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?;
}
tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?;
if let Some(access_token) = access_token {
tokens.access_token = access_token;
}
@@ -418,13 +394,13 @@ async fn update_tokens(
tokens.refresh_token = refresh_token;
}
auth_dot_json.last_refresh = Some(Utc::now());
storage.save(&auth_dot_json)?;
write_auth_json(auth_file, &auth_dot_json)?;
Ok(auth_dot_json)
}
async fn try_refresh_token(
refresh_token: String,
client: &CodexHttpClient,
client: &reqwest::Client,
) -> std::io::Result<RefreshResponse> {
let refresh_request = RefreshRequest {
client_id: CLIENT_ID,
@@ -450,9 +426,8 @@ async fn try_refresh_token(
Ok(refresh_response)
} else {
Err(std::io::Error::other(format!(
"Failed to refresh token: {}: {}",
response.status(),
try_parse_error_message(&response.text().await.unwrap_or_default()),
"Failed to refresh token: {}",
response.status()
)))
}
}
@@ -467,11 +442,24 @@ struct RefreshRequest {
#[derive(Deserialize, Clone)]
struct RefreshResponse {
id_token: Option<String>,
id_token: String,
access_token: Option<String>,
refresh_token: Option<String>,
}
/// Expected structure for $CODEX_HOME/auth.json.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct AuthDotJson {
#[serde(rename = "OPENAI_API_KEY")]
pub openai_api_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tokens: Option<TokenData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub last_refresh: Option<DateTime<Utc>>,
}
// Shared constant for token refresh (client id used for oauth token refresh flow)
pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann";
@@ -486,15 +474,12 @@ struct CachedAuth {
#[cfg(test)]
mod tests {
use super::*;
use crate::auth::storage::FileAuthStorage;
use crate::auth::storage::get_auth_file;
use crate::config::Config;
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;
use crate::token_data::IdTokenInfo;
use crate::token_data::KnownPlan;
use crate::token_data::PlanType;
use base64::Engine;
use codex_protocol::config_types::ForcedLoginMethod;
use pretty_assertions::assert_eq;
@@ -503,9 +488,9 @@ mod tests {
use tempfile::tempdir;
#[tokio::test]
async fn refresh_without_id_token() {
async fn roundtrip_auth_dot_json() {
let codex_home = tempdir().unwrap();
let fake_jwt = write_auth_file(
let _ = write_auth_file(
AuthFileParams {
openai_api_key: None,
chatgpt_plan_type: "pro".to_string(),
@@ -515,23 +500,12 @@ mod tests {
)
.expect("failed to write auth file");
let storage = create_auth_storage(
codex_home.path().to_path_buf(),
AuthCredentialsStoreMode::File,
);
let updated = super::update_tokens(
&storage,
None,
Some("new-access-token".to_string()),
Some("new-refresh-token".to_string()),
)
.await
.expect("update_tokens should succeed");
let file = get_auth_file(codex_home.path());
let auth_dot_json = try_read_auth_json(&file).unwrap();
write_auth_json(&file, &auth_dot_json).unwrap();
let tokens = updated.tokens.expect("tokens should exist");
assert_eq!(tokens.id_token.raw_jwt, fake_jwt);
assert_eq!(tokens.access_token, "new-access-token");
assert_eq!(tokens.refresh_token, "new-refresh-token");
let same_auth_dot_json = try_read_auth_json(&file).unwrap();
assert_eq!(auth_dot_json, same_auth_dot_json);
}
#[test]
@@ -553,13 +527,9 @@ mod tests {
)
.unwrap();
super::login_with_api_key(dir.path(), "sk-new", AuthCredentialsStoreMode::File)
.expect("login_with_api_key should succeed");
super::login_with_api_key(dir.path(), "sk-new").expect("login_with_api_key should succeed");
let storage = FileAuthStorage::new(dir.path().to_path_buf());
let auth = storage
.try_read_auth_json(&auth_path)
.expect("auth.json should parse");
let auth = super::try_read_auth_json(&auth_path).expect("auth.json should parse");
assert_eq!(auth.openai_api_key.as_deref(), Some("sk-new"));
assert!(auth.tokens.is_none(), "tokens should be cleared");
}
@@ -567,13 +537,11 @@ mod tests {
#[test]
fn missing_auth_json_returns_none() {
let dir = tempdir().unwrap();
let auth = CodexAuth::from_auth_storage(dir.path(), AuthCredentialsStoreMode::File)
.expect("call should succeed");
let auth = CodexAuth::from_codex_home(dir.path()).expect("call should succeed");
assert_eq!(auth, None);
}
#[tokio::test]
#[serial(codex_api_key)]
async fn pro_account_with_no_api_key_uses_chatgpt_auth() {
let codex_home = tempdir().unwrap();
let fake_jwt = write_auth_file(
@@ -590,11 +558,9 @@ mod tests {
api_key,
mode,
auth_dot_json,
storage: _,
auth_file: _,
..
} = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File)
.unwrap()
.unwrap();
} = super::load_auth(codex_home.path(), false).unwrap().unwrap();
assert_eq!(None, api_key);
assert_eq!(AuthMode::ChatGPT, mode);
@@ -625,7 +591,6 @@ mod tests {
}
#[tokio::test]
#[serial(codex_api_key)]
async fn loads_api_key_from_auth_json() {
let dir = tempdir().unwrap();
let auth_file = dir.path().join("auth.json");
@@ -635,9 +600,7 @@ mod tests {
)
.unwrap();
let auth = super::load_auth(dir.path(), false, AuthCredentialsStoreMode::File)
.unwrap()
.unwrap();
let auth = super::load_auth(dir.path(), false).unwrap().unwrap();
assert_eq!(auth.mode, AuthMode::ApiKey);
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
@@ -652,11 +615,11 @@ mod tests {
tokens: None,
last_refresh: None,
};
super::save_auth(dir.path(), &auth_dot_json, AuthCredentialsStoreMode::File)?;
let auth_file = get_auth_file(dir.path());
assert!(auth_file.exists());
assert!(logout(dir.path(), AuthCredentialsStoreMode::File)?);
assert!(!auth_file.exists());
write_auth_json(&get_auth_file(dir.path()), &auth_dot_json)?;
assert!(dir.path().join("auth.json").exists());
let removed = logout(dir.path())?;
assert!(removed);
assert!(!dir.path().join("auth.json").exists());
Ok(())
}
@@ -764,8 +727,7 @@ mod tests {
#[tokio::test]
async fn enforce_login_restrictions_logs_out_for_method_mismatch() {
let codex_home = tempdir().unwrap();
login_with_api_key(codex_home.path(), "sk-test", AuthCredentialsStoreMode::File)
.expect("seed api key");
login_with_api_key(codex_home.path(), "sk-test").expect("seed api key");
let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None);
@@ -780,7 +742,6 @@ mod tests {
}
#[tokio::test]
#[serial(codex_api_key)]
async fn enforce_login_restrictions_logs_out_for_workspace_mismatch() {
let codex_home = tempdir().unwrap();
let _jwt = write_auth_file(
@@ -806,7 +767,6 @@ mod tests {
}
#[tokio::test]
#[serial(codex_api_key)]
async fn enforce_login_restrictions_allows_matching_workspace() {
let codex_home = tempdir().unwrap();
let _jwt = write_auth_file(
@@ -834,8 +794,7 @@ mod tests {
async fn enforce_login_restrictions_allows_api_key_if_login_method_not_set_but_forced_chatgpt_workspace_id_is_set()
{
let codex_home = tempdir().unwrap();
login_with_api_key(codex_home.path(), "sk-test", AuthCredentialsStoreMode::File)
.expect("seed api key");
login_with_api_key(codex_home.path(), "sk-test").expect("seed api key");
let config = build_config(codex_home.path(), None, Some("org_mine".to_string()));
@@ -879,7 +838,6 @@ pub struct AuthManager {
codex_home: PathBuf,
inner: RwLock<CachedAuth>,
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
}
impl AuthManager {
@@ -887,23 +845,14 @@ impl AuthManager {
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
/// simply return `None` in that case so callers can treat it as an
/// unauthenticated state.
pub fn new(
codex_home: PathBuf,
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> Self {
let auth = load_auth(
&codex_home,
enable_codex_api_key_env,
auth_credentials_store_mode,
)
.ok()
.flatten();
pub fn new(codex_home: PathBuf, enable_codex_api_key_env: bool) -> Self {
let auth = load_auth(&codex_home, enable_codex_api_key_env)
.ok()
.flatten();
Self {
codex_home,
inner: RwLock::new(CachedAuth { auth }),
enable_codex_api_key_env,
auth_credentials_store_mode,
}
}
@@ -914,7 +863,6 @@ impl AuthManager {
codex_home: PathBuf::new(),
inner: RwLock::new(cached),
enable_codex_api_key_env: false,
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
})
}
@@ -926,13 +874,9 @@ impl AuthManager {
/// Force a reload of the auth information from auth.json. Returns
/// whether the auth value changed.
pub fn reload(&self) -> bool {
let new_auth = load_auth(
&self.codex_home,
self.enable_codex_api_key_env,
self.auth_credentials_store_mode,
)
.ok()
.flatten();
let new_auth = load_auth(&self.codex_home, self.enable_codex_api_key_env)
.ok()
.flatten();
if let Ok(mut guard) = self.inner.write() {
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
guard.auth = new_auth;
@@ -951,16 +895,8 @@ impl AuthManager {
}
/// Convenience constructor returning an `Arc` wrapper.
pub fn shared(
codex_home: PathBuf,
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> Arc<Self> {
Arc::new(Self::new(
codex_home,
enable_codex_api_key_env,
auth_credentials_store_mode,
))
pub fn shared(codex_home: PathBuf, enable_codex_api_key_env: bool) -> Arc<Self> {
Arc::new(Self::new(codex_home, enable_codex_api_key_env))
}
/// Attempt to refresh the current auth token (if any). On success, reload
@@ -976,10 +912,7 @@ impl AuthManager {
self.reload();
Ok(Some(token))
}
Err(e) => {
tracing::error!("Failed to refresh token: {}", e);
Err(e)
}
Err(e) => Err(e),
}
}
@@ -988,7 +921,7 @@ impl AuthManager {
/// reloads the inmemory auth cache so callers immediately observe the
/// unauthenticated state.
pub fn logout(&self) -> std::io::Result<bool> {
let removed = super::auth::logout(&self.codex_home, self.auth_credentials_store_mode)?;
let removed = super::auth::logout(&self.codex_home)?;
// Always reload to clear any cached auth (even if file absent).
self.reload();
Ok(removed)

View File

@@ -1,672 +0,0 @@
use chrono::DateTime;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use sha2::Digest;
use sha2::Sha256;
use std::fmt::Debug;
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Read;
use std::io::Write;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use tracing::warn;
use crate::token_data::TokenData;
use codex_keyring_store::DefaultKeyringStore;
use codex_keyring_store::KeyringStore;
/// Determine where Codex should store CLI auth credentials.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthCredentialsStoreMode {
#[default]
/// Persist credentials in CODEX_HOME/auth.json.
File,
/// Persist credentials in the keyring. Fail if unavailable.
Keyring,
/// Use keyring when available; otherwise, fall back to a file in CODEX_HOME.
Auto,
}
/// Expected structure for $CODEX_HOME/auth.json.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct AuthDotJson {
#[serde(rename = "OPENAI_API_KEY")]
pub openai_api_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tokens: Option<TokenData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub last_refresh: Option<DateTime<Utc>>,
}
pub(super) fn get_auth_file(codex_home: &Path) -> PathBuf {
codex_home.join("auth.json")
}
pub(super) fn delete_file_if_exists(codex_home: &Path) -> std::io::Result<bool> {
let auth_file = get_auth_file(codex_home);
match std::fs::remove_file(&auth_file) {
Ok(()) => Ok(true),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
Err(err) => Err(err),
}
}
pub(super) trait AuthStorageBackend: Debug + Send + Sync {
fn load(&self) -> std::io::Result<Option<AuthDotJson>>;
fn save(&self, auth: &AuthDotJson) -> std::io::Result<()>;
fn delete(&self) -> std::io::Result<bool>;
}
#[derive(Clone, Debug)]
pub(super) struct FileAuthStorage {
codex_home: PathBuf,
}
impl FileAuthStorage {
pub(super) fn new(codex_home: PathBuf) -> Self {
Self { codex_home }
}
/// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure after refreshing if necessary.
pub(super) fn try_read_auth_json(&self, auth_file: &Path) -> std::io::Result<AuthDotJson> {
let mut file = File::open(auth_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?;
Ok(auth_dot_json)
}
}
impl AuthStorageBackend for FileAuthStorage {
fn load(&self) -> std::io::Result<Option<AuthDotJson>> {
let auth_file = get_auth_file(&self.codex_home);
let auth_dot_json = match self.try_read_auth_json(&auth_file) {
Ok(auth) => auth,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(err) => return Err(err),
};
Ok(Some(auth_dot_json))
}
fn save(&self, auth_dot_json: &AuthDotJson) -> std::io::Result<()> {
let auth_file = get_auth_file(&self.codex_home);
if let Some(parent) = auth_file.parent() {
std::fs::create_dir_all(parent)?;
}
let json_data = serde_json::to_string_pretty(auth_dot_json)?;
let mut options = OpenOptions::new();
options.truncate(true).write(true).create(true);
#[cfg(unix)]
{
options.mode(0o600);
}
let mut file = options.open(auth_file)?;
file.write_all(json_data.as_bytes())?;
file.flush()?;
Ok(())
}
fn delete(&self) -> std::io::Result<bool> {
delete_file_if_exists(&self.codex_home)
}
}
const KEYRING_SERVICE: &str = "Codex Auth";
// turns codex_home path into a stable, short key string
fn compute_store_key(codex_home: &Path) -> std::io::Result<String> {
let canonical = codex_home
.canonicalize()
.unwrap_or_else(|_| codex_home.to_path_buf());
let path_str = canonical.to_string_lossy();
let mut hasher = Sha256::new();
hasher.update(path_str.as_bytes());
let digest = hasher.finalize();
let hex = format!("{digest:x}");
let truncated = hex.get(..16).unwrap_or(&hex);
Ok(format!("cli|{truncated}"))
}
#[derive(Clone, Debug)]
struct KeyringAuthStorage {
codex_home: PathBuf,
keyring_store: Arc<dyn KeyringStore>,
}
impl KeyringAuthStorage {
fn new(codex_home: PathBuf, keyring_store: Arc<dyn KeyringStore>) -> Self {
Self {
codex_home,
keyring_store,
}
}
fn load_from_keyring(&self, key: &str) -> std::io::Result<Option<AuthDotJson>> {
match self.keyring_store.load(KEYRING_SERVICE, key) {
Ok(Some(serialized)) => serde_json::from_str(&serialized).map(Some).map_err(|err| {
std::io::Error::other(format!(
"failed to deserialize CLI auth from keyring: {err}"
))
}),
Ok(None) => Ok(None),
Err(error) => Err(std::io::Error::other(format!(
"failed to load CLI auth from keyring: {}",
error.message()
))),
}
}
fn save_to_keyring(&self, key: &str, value: &str) -> std::io::Result<()> {
match self.keyring_store.save(KEYRING_SERVICE, key, value) {
Ok(()) => Ok(()),
Err(error) => {
let message = format!(
"failed to write OAuth tokens to keyring: {}",
error.message()
);
warn!("{message}");
Err(std::io::Error::other(message))
}
}
}
}
impl AuthStorageBackend for KeyringAuthStorage {
fn load(&self) -> std::io::Result<Option<AuthDotJson>> {
let key = compute_store_key(&self.codex_home)?;
self.load_from_keyring(&key)
}
fn save(&self, auth: &AuthDotJson) -> std::io::Result<()> {
let key = compute_store_key(&self.codex_home)?;
// Simpler error mapping per style: prefer method reference over closure
let serialized = serde_json::to_string(auth).map_err(std::io::Error::other)?;
self.save_to_keyring(&key, &serialized)?;
if let Err(err) = delete_file_if_exists(&self.codex_home) {
warn!("failed to remove CLI auth fallback file: {err}");
}
Ok(())
}
fn delete(&self) -> std::io::Result<bool> {
let key = compute_store_key(&self.codex_home)?;
let keyring_removed = self
.keyring_store
.delete(KEYRING_SERVICE, &key)
.map_err(|err| {
std::io::Error::other(format!("failed to delete auth from keyring: {err}"))
})?;
let file_removed = delete_file_if_exists(&self.codex_home)?;
Ok(keyring_removed || file_removed)
}
}
#[derive(Clone, Debug)]
struct AutoAuthStorage {
keyring_storage: Arc<KeyringAuthStorage>,
file_storage: Arc<FileAuthStorage>,
}
impl AutoAuthStorage {
fn new(codex_home: PathBuf, keyring_store: Arc<dyn KeyringStore>) -> Self {
Self {
keyring_storage: Arc::new(KeyringAuthStorage::new(codex_home.clone(), keyring_store)),
file_storage: Arc::new(FileAuthStorage::new(codex_home)),
}
}
}
impl AuthStorageBackend for AutoAuthStorage {
fn load(&self) -> std::io::Result<Option<AuthDotJson>> {
match self.keyring_storage.load() {
Ok(Some(auth)) => Ok(Some(auth)),
Ok(None) => self.file_storage.load(),
Err(err) => {
warn!("failed to load CLI auth from keyring, falling back to file storage: {err}");
self.file_storage.load()
}
}
}
fn save(&self, auth: &AuthDotJson) -> std::io::Result<()> {
match self.keyring_storage.save(auth) {
Ok(()) => Ok(()),
Err(err) => {
warn!("failed to save auth to keyring, falling back to file storage: {err}");
self.file_storage.save(auth)
}
}
}
fn delete(&self) -> std::io::Result<bool> {
// Keyring storage will delete from disk as well
self.keyring_storage.delete()
}
}
pub(super) fn create_auth_storage(
codex_home: PathBuf,
mode: AuthCredentialsStoreMode,
) -> Arc<dyn AuthStorageBackend> {
let keyring_store: Arc<dyn KeyringStore> = Arc::new(DefaultKeyringStore);
create_auth_storage_with_keyring_store(codex_home, mode, keyring_store)
}
fn create_auth_storage_with_keyring_store(
codex_home: PathBuf,
mode: AuthCredentialsStoreMode,
keyring_store: Arc<dyn KeyringStore>,
) -> Arc<dyn AuthStorageBackend> {
match mode {
AuthCredentialsStoreMode::File => Arc::new(FileAuthStorage::new(codex_home)),
AuthCredentialsStoreMode::Keyring => {
Arc::new(KeyringAuthStorage::new(codex_home, keyring_store))
}
AuthCredentialsStoreMode::Auto => Arc::new(AutoAuthStorage::new(codex_home, keyring_store)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::token_data::IdTokenInfo;
use anyhow::Context;
use base64::Engine;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::tempdir;
use codex_keyring_store::tests::MockKeyringStore;
use keyring::Error as KeyringError;
#[tokio::test]
async fn file_storage_load_returns_auth_dot_json() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let storage = FileAuthStorage::new(codex_home.path().to_path_buf());
let auth_dot_json = AuthDotJson {
openai_api_key: Some("test-key".to_string()),
tokens: None,
last_refresh: Some(Utc::now()),
};
storage
.save(&auth_dot_json)
.context("failed to save auth file")?;
let loaded = storage.load().context("failed to load auth file")?;
assert_eq!(Some(auth_dot_json), loaded);
Ok(())
}
#[tokio::test]
async fn file_storage_save_persists_auth_dot_json() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let storage = FileAuthStorage::new(codex_home.path().to_path_buf());
let auth_dot_json = AuthDotJson {
openai_api_key: Some("test-key".to_string()),
tokens: None,
last_refresh: Some(Utc::now()),
};
let file = get_auth_file(codex_home.path());
storage
.save(&auth_dot_json)
.context("failed to save auth file")?;
let same_auth_dot_json = storage
.try_read_auth_json(&file)
.context("failed to read auth file after save")?;
assert_eq!(auth_dot_json, same_auth_dot_json);
Ok(())
}
#[test]
fn file_storage_delete_removes_auth_file() -> anyhow::Result<()> {
let dir = tempdir()?;
let auth_dot_json = AuthDotJson {
openai_api_key: Some("sk-test-key".to_string()),
tokens: None,
last_refresh: None,
};
let storage = create_auth_storage(dir.path().to_path_buf(), AuthCredentialsStoreMode::File);
storage.save(&auth_dot_json)?;
assert!(dir.path().join("auth.json").exists());
let storage = FileAuthStorage::new(dir.path().to_path_buf());
let removed = storage.delete()?;
assert!(removed);
assert!(!dir.path().join("auth.json").exists());
Ok(())
}
fn seed_keyring_and_fallback_auth_file_for_delete<F>(
mock_keyring: &MockKeyringStore,
codex_home: &Path,
compute_key: F,
) -> anyhow::Result<(String, PathBuf)>
where
F: FnOnce() -> std::io::Result<String>,
{
let key = compute_key()?;
mock_keyring.save(KEYRING_SERVICE, &key, "{}")?;
let auth_file = get_auth_file(codex_home);
std::fs::write(&auth_file, "stale")?;
Ok((key, auth_file))
}
fn seed_keyring_with_auth<F>(
mock_keyring: &MockKeyringStore,
compute_key: F,
auth: &AuthDotJson,
) -> anyhow::Result<()>
where
F: FnOnce() -> std::io::Result<String>,
{
let key = compute_key()?;
let serialized = serde_json::to_string(auth)?;
mock_keyring.save(KEYRING_SERVICE, &key, &serialized)?;
Ok(())
}
fn assert_keyring_saved_auth_and_removed_fallback(
mock_keyring: &MockKeyringStore,
key: &str,
codex_home: &Path,
expected: &AuthDotJson,
) {
let saved_value = mock_keyring
.saved_value(key)
.expect("keyring entry should exist");
let expected_serialized = serde_json::to_string(expected).expect("serialize expected auth");
assert_eq!(saved_value, expected_serialized);
let auth_file = get_auth_file(codex_home);
assert!(
!auth_file.exists(),
"fallback auth.json should be removed after keyring save"
);
}
fn id_token_with_prefix(prefix: &str) -> IdTokenInfo {
#[derive(Serialize)]
struct Header {
alg: &'static str,
typ: &'static str,
}
let header = Header {
alg: "none",
typ: "JWT",
};
let payload = json!({
"email": format!("{prefix}@example.com"),
"https://api.openai.com/auth": {
"chatgpt_account_id": format!("{prefix}-account"),
},
});
let encode = |bytes: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes);
let header_b64 = encode(&serde_json::to_vec(&header).expect("serialize header"));
let payload_b64 = encode(&serde_json::to_vec(&payload).expect("serialize payload"));
let signature_b64 = encode(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
crate::token_data::parse_id_token(&fake_jwt).expect("fake JWT should parse")
}
fn auth_with_prefix(prefix: &str) -> AuthDotJson {
AuthDotJson {
openai_api_key: Some(format!("{prefix}-api-key")),
tokens: Some(TokenData {
id_token: id_token_with_prefix(prefix),
access_token: format!("{prefix}-access"),
refresh_token: format!("{prefix}-refresh"),
account_id: Some(format!("{prefix}-account-id")),
}),
last_refresh: None,
}
}
#[test]
fn keyring_auth_storage_load_returns_deserialized_auth() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = KeyringAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let expected = AuthDotJson {
openai_api_key: Some("sk-test".to_string()),
tokens: None,
last_refresh: None,
};
seed_keyring_with_auth(
&mock_keyring,
|| compute_store_key(codex_home.path()),
&expected,
)?;
let loaded = storage.load()?;
assert_eq!(Some(expected), loaded);
Ok(())
}
#[test]
fn keyring_auth_storage_compute_store_key_for_home_directory() -> anyhow::Result<()> {
let codex_home = PathBuf::from("~/.codex");
let key = compute_store_key(codex_home.as_path())?;
assert_eq!(key, "cli|940db7b1d0e4eb40");
Ok(())
}
#[test]
fn keyring_auth_storage_save_persists_and_removes_fallback_file() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = KeyringAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let auth_file = get_auth_file(codex_home.path());
std::fs::write(&auth_file, "stale")?;
let auth = AuthDotJson {
openai_api_key: None,
tokens: Some(TokenData {
id_token: Default::default(),
access_token: "access".to_string(),
refresh_token: "refresh".to_string(),
account_id: Some("account".to_string()),
}),
last_refresh: Some(Utc::now()),
};
storage.save(&auth)?;
let key = compute_store_key(codex_home.path())?;
assert_keyring_saved_auth_and_removed_fallback(
&mock_keyring,
&key,
codex_home.path(),
&auth,
);
Ok(())
}
#[test]
fn keyring_auth_storage_delete_removes_keyring_and_file() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = KeyringAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let (key, auth_file) = seed_keyring_and_fallback_auth_file_for_delete(
&mock_keyring,
codex_home.path(),
|| compute_store_key(codex_home.path()),
)?;
let removed = storage.delete()?;
assert!(removed, "delete should report removal");
assert!(
!mock_keyring.contains(&key),
"keyring entry should be removed"
);
assert!(
!auth_file.exists(),
"fallback auth.json should be removed after keyring delete"
);
Ok(())
}
#[test]
fn auto_auth_storage_load_prefers_keyring_value() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let keyring_auth = auth_with_prefix("keyring");
seed_keyring_with_auth(
&mock_keyring,
|| compute_store_key(codex_home.path()),
&keyring_auth,
)?;
let file_auth = auth_with_prefix("file");
storage.file_storage.save(&file_auth)?;
let loaded = storage.load()?;
assert_eq!(loaded, Some(keyring_auth));
Ok(())
}
#[test]
fn auto_auth_storage_load_uses_file_when_keyring_empty() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(codex_home.path().to_path_buf(), Arc::new(mock_keyring));
let expected = auth_with_prefix("file-only");
storage.file_storage.save(&expected)?;
let loaded = storage.load()?;
assert_eq!(loaded, Some(expected));
Ok(())
}
#[test]
fn auto_auth_storage_load_falls_back_when_keyring_errors() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let key = compute_store_key(codex_home.path())?;
mock_keyring.set_error(&key, KeyringError::Invalid("error".into(), "load".into()));
let expected = auth_with_prefix("fallback");
storage.file_storage.save(&expected)?;
let loaded = storage.load()?;
assert_eq!(loaded, Some(expected));
Ok(())
}
#[test]
fn auto_auth_storage_save_prefers_keyring() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let key = compute_store_key(codex_home.path())?;
let stale = auth_with_prefix("stale");
storage.file_storage.save(&stale)?;
let expected = auth_with_prefix("to-save");
storage.save(&expected)?;
assert_keyring_saved_auth_and_removed_fallback(
&mock_keyring,
&key,
codex_home.path(),
&expected,
);
Ok(())
}
#[test]
fn auto_auth_storage_save_falls_back_when_keyring_errors() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let key = compute_store_key(codex_home.path())?;
mock_keyring.set_error(&key, KeyringError::Invalid("error".into(), "save".into()));
let auth = auth_with_prefix("fallback");
storage.save(&auth)?;
let auth_file = get_auth_file(codex_home.path());
assert!(
auth_file.exists(),
"fallback auth.json should be created when keyring save fails"
);
let saved = storage
.file_storage
.load()?
.context("fallback auth should exist")?;
assert_eq!(saved, auth);
assert!(
mock_keyring.saved_value(&key).is_none(),
"keyring should not contain value when save fails"
);
Ok(())
}
#[test]
fn auto_auth_storage_delete_removes_keyring_and_file() -> anyhow::Result<()> {
let codex_home = tempdir()?;
let mock_keyring = MockKeyringStore::default();
let storage = AutoAuthStorage::new(
codex_home.path().to_path_buf(),
Arc::new(mock_keyring.clone()),
);
let (key, auth_file) = seed_keyring_and_fallback_auth_file_for_delete(
&mock_keyring,
codex_home.path(),
|| compute_store_key(codex_home.path()),
)?;
let removed = storage.delete()?;
assert!(removed, "delete should report removal");
assert!(
!mock_keyring.contains(&key),
"keyring entry should be removed"
);
assert!(
!auth_file.exists(),
"fallback auth.json should be removed after delete"
);
Ok(())
}
}

View File

@@ -5,13 +5,13 @@ use tree_sitter_bash::LANGUAGE as BASH;
/// Parse the provided bash source using tree-sitter-bash, returning a Tree on
/// success or None if parsing failed.
pub fn try_parse_shell(shell_lc_arg: &str) -> Option<Tree> {
pub fn try_parse_bash(bash_lc_arg: &str) -> Option<Tree> {
let lang = BASH.into();
let mut parser = Parser::new();
#[expect(clippy::expect_used)]
parser.set_language(&lang).expect("load bash grammar");
let old_tree: Option<&Tree> = None;
parser.parse(shell_lc_arg, old_tree)
parser.parse(bash_lc_arg, old_tree)
}
/// Parse a script which may contain multiple simple commands joined only by
@@ -88,19 +88,18 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
Some(commands)
}
/// Returns the sequence of plain commands within a `bash -lc "..."` or
/// `zsh -lc "..."` invocation when the script only contains word-only commands
/// joined by safe operators.
pub fn parse_shell_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> {
let [shell, flag, script] = command else {
/// Returns the sequence of plain commands within a `bash -lc "..."` invocation
/// when the script only contains word-only commands joined by safe operators.
pub fn parse_bash_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> {
let [bash, flag, script] = command else {
return None;
};
if flag != "-lc" || !(shell == "bash" || shell == "zsh") {
if bash != "bash" || flag != "-lc" {
return None;
}
let tree = try_parse_shell(script)?;
let tree = try_parse_bash(script)?;
try_parse_word_only_commands_sequence(&tree, script)
}
@@ -155,7 +154,7 @@ mod tests {
use super::*;
fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> {
let tree = try_parse_shell(src)?;
let tree = try_parse_bash(src)?;
try_parse_word_only_commands_sequence(&tree, src)
}
@@ -235,11 +234,4 @@ mod tests {
fn rejects_trailing_operator_parse_error() {
assert!(parse_seq("ls &&").is_none());
}
#[test]
fn parse_zsh_lc_plain_commands() {
let command = vec!["zsh".to_string(), "-lc".to_string(), "ls".to_string()];
let parsed = parse_shell_lc_plain_commands(&command).unwrap();
assert_eq!(parsed, vec![vec!["ls".to_string()]]);
}
}

View File

@@ -4,7 +4,6 @@ use crate::ModelProviderInfo;
use crate::client_common::Prompt;
use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::default_client::CodexHttpClient;
use crate::error::CodexErr;
use crate::error::ConnectionFailedError;
use crate::error::ResponseStreamFailed;
@@ -17,11 +16,8 @@ use crate::util::backoff;
use bytes::Bytes;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputContentItem;
use codex_protocol::models::ReasoningItemContent;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use eventsource_stream::Eventsource;
use futures::Stream;
use futures::StreamExt;
@@ -40,10 +36,9 @@ use tracing::trace;
pub(crate) async fn stream_chat_completions(
prompt: &Prompt,
model_family: &ModelFamily,
client: &CodexHttpClient,
client: &reqwest::Client,
provider: &ModelProviderInfo,
otel_event_manager: &OtelEventManager,
session_source: &SessionSource,
) -> Result<ResponseStream> {
if prompt.output_schema.is_some() {
return Err(CodexErr::UnsupportedOperation(
@@ -80,7 +75,6 @@ pub(crate) async fn stream_chat_completions(
ResponseItem::CustomToolCall { .. } => {}
ResponseItem::CustomToolCallOutput { .. } => {}
ResponseItem::WebSearchCall { .. } => {}
ResponseItem::GhostSnapshot { .. } => {}
}
}
@@ -110,10 +104,10 @@ pub(crate) async fn stream_chat_completions(
} = item
{
let mut text = String::new();
for entry in items {
match entry {
ReasoningItemContent::ReasoningText { text: segment }
| ReasoningItemContent::Text { text: segment } => text.push_str(segment),
for c in items {
match c {
ReasoningItemContent::ReasoningText { text: t }
| ReasoningItemContent::Text { text: t } => text.push_str(t),
}
}
if text.trim().is_empty() {
@@ -163,26 +157,16 @@ pub(crate) async fn stream_chat_completions(
for (idx, item) in input.iter().enumerate() {
match item {
ResponseItem::Message { role, content, .. } => {
// Build content either as a plain string (typical for assistant text)
// or as an array of content items when images are present (user/tool multimodal).
let mut text = String::new();
let mut items: Vec<serde_json::Value> = Vec::new();
let mut saw_image = false;
for c in content {
match c {
ContentItem::InputText { text: t }
| ContentItem::OutputText { text: t } => {
text.push_str(t);
items.push(json!({"type":"text","text": t}));
}
ContentItem::InputImage { image_url } => {
saw_image = true;
items.push(json!({"type":"image_url","image_url": {"url": image_url}}));
}
_ => {}
}
}
// Skip exact-duplicate assistant messages.
if role == "assistant" {
if let Some(prev) = &last_assistant_text
@@ -193,17 +177,7 @@ pub(crate) async fn stream_chat_completions(
last_assistant_text = Some(text.clone());
}
// For assistant messages, always send a plain string for compatibility.
// For user messages, if an image is present, send an array of content items.
let content_value = if role == "assistant" {
json!(text)
} else if saw_image {
json!(items)
} else {
json!(text)
};
let mut msg = json!({"role": role, "content": content_value});
let mut msg = json!({"role": role, "content": text});
if role == "assistant"
&& let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
&& let Some(obj) = msg.as_object_mut()
@@ -262,29 +236,10 @@ pub(crate) async fn stream_chat_completions(
messages.push(msg);
}
ResponseItem::FunctionCallOutput { call_id, output } => {
// Prefer structured content items when available (e.g., images)
// otherwise fall back to the legacy plain-string content.
let content_value = if let Some(items) = &output.content_items {
let mapped: Vec<serde_json::Value> = items
.iter()
.map(|it| match it {
FunctionCallOutputContentItem::InputText { text } => {
json!({"type":"text","text": text})
}
FunctionCallOutputContentItem::InputImage { image_url } => {
json!({"type":"image_url","image_url": {"url": image_url}})
}
})
.collect();
json!(mapped)
} else {
json!(output.content)
};
messages.push(json!({
"role": "tool",
"tool_call_id": call_id,
"content": content_value,
"content": output.content,
}));
}
ResponseItem::CustomToolCall {
@@ -314,10 +269,6 @@ pub(crate) async fn stream_chat_completions(
"content": output,
}));
}
ResponseItem::GhostSnapshot { .. } => {
// Ghost snapshots annotate history but are not sent to the model.
continue;
}
ResponseItem::Reasoning { .. }
| ResponseItem::WebSearchCall { .. }
| ResponseItem::Other => {
@@ -346,20 +297,7 @@ pub(crate) async fn stream_chat_completions(
loop {
attempt += 1;
let mut req_builder = provider.create_request_builder(client, &None).await?;
// Include subagent header only for subagent sessions.
if let SessionSource::SubAgent(sub) = session_source.clone() {
let subagent = if let SubAgentSource::Other(label) = sub {
label
} else {
serde_json::to_value(&sub)
.ok()
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
.unwrap_or_else(|| "other".to_string())
};
req_builder = req_builder.header("x-openai-subagent", subagent);
}
let req_builder = provider.create_request_builder(client, &None).await?;
let res = otel_event_manager
.log_request(attempt, || {
@@ -429,61 +367,6 @@ pub(crate) async fn stream_chat_completions(
}
}
async fn append_assistant_text(
tx_event: &mpsc::Sender<Result<ResponseEvent>>,
assistant_item: &mut Option<ResponseItem>,
text: String,
) {
if assistant_item.is_none() {
let item = ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![],
};
*assistant_item = Some(item.clone());
let _ = tx_event
.send(Ok(ResponseEvent::OutputItemAdded(item)))
.await;
}
if let Some(ResponseItem::Message { content, .. }) = assistant_item {
content.push(ContentItem::OutputText { text: text.clone() });
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(text.clone())))
.await;
}
}
async fn append_reasoning_text(
tx_event: &mpsc::Sender<Result<ResponseEvent>>,
reasoning_item: &mut Option<ResponseItem>,
text: String,
) {
if reasoning_item.is_none() {
let item = ResponseItem::Reasoning {
id: String::new(),
summary: Vec::new(),
content: Some(vec![]),
encrypted_content: None,
};
*reasoning_item = Some(item.clone());
let _ = tx_event
.send(Ok(ResponseEvent::OutputItemAdded(item)))
.await;
}
if let Some(ResponseItem::Reasoning {
content: Some(content),
..
}) = reasoning_item
{
content.push(ReasoningItemContent::ReasoningText { text: text.clone() });
let _ = tx_event
.send(Ok(ResponseEvent::ReasoningContentDelta(text.clone())))
.await;
}
}
/// Lightweight SSE processor for the Chat Completions streaming format. The
/// output is mapped onto Codex's internal [`ResponseEvent`] so that the rest
/// of the pipeline can stay agnostic of the underlying wire format.
@@ -511,8 +394,8 @@ async fn process_chat_sse<S>(
}
let mut fn_call_state = FunctionCallState::default();
let mut assistant_item: Option<ResponseItem> = None;
let mut reasoning_item: Option<ResponseItem> = None;
let mut assistant_text = String::new();
let mut reasoning_text = String::new();
loop {
let start = std::time::Instant::now();
@@ -553,11 +436,26 @@ async fn process_chat_sse<S>(
if sse.data.trim() == "[DONE]" {
// Emit any finalized items before closing so downstream consumers receive
// terminal events for both assistant content and raw reasoning.
if let Some(item) = assistant_item {
if !assistant_text.is_empty() {
let item = ResponseItem::Message {
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: std::mem::take(&mut assistant_text),
}],
id: None,
};
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
if let Some(item) = reasoning_item {
if !reasoning_text.is_empty() {
let item = ResponseItem::Reasoning {
id: String::new(),
summary: Vec::new(),
content: Some(vec![ReasoningItemContent::ReasoningText {
text: std::mem::take(&mut reasoning_text),
}]),
encrypted_content: None,
};
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
@@ -587,7 +485,10 @@ async fn process_chat_sse<S>(
.and_then(|c| c.as_str())
&& !content.is_empty()
{
append_assistant_text(&tx_event, &mut assistant_item, content.to_string()).await;
assistant_text.push_str(content);
let _ = tx_event
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
.await;
}
// Forward any reasoning/thinking deltas if present.
@@ -617,7 +518,10 @@ async fn process_chat_sse<S>(
if let Some(reasoning) = maybe_text {
// Accumulate so we can emit a terminal Reasoning item at the end.
append_reasoning_text(&tx_event, &mut reasoning_item, reasoning).await;
reasoning_text.push_str(&reasoning);
let _ = tx_event
.send(Ok(ResponseEvent::ReasoningContentDelta(reasoning)))
.await;
}
}
@@ -627,7 +531,10 @@ async fn process_chat_sse<S>(
// Accept either a plain string or an object with { text | content }
if let Some(s) = message_reasoning.as_str() {
if !s.is_empty() {
append_reasoning_text(&tx_event, &mut reasoning_item, s.to_string()).await;
reasoning_text.push_str(s);
let _ = tx_event
.send(Ok(ResponseEvent::ReasoningContentDelta(s.to_string())))
.await;
}
} else if let Some(obj) = message_reasoning.as_object()
&& let Some(s) = obj
@@ -636,7 +543,10 @@ async fn process_chat_sse<S>(
.or_else(|| obj.get("content").and_then(|v| v.as_str()))
&& !s.is_empty()
{
append_reasoning_text(&tx_event, &mut reasoning_item, s.to_string()).await;
reasoning_text.push_str(s);
let _ = tx_event
.send(Ok(ResponseEvent::ReasoningContentDelta(s.to_string())))
.await;
}
}
@@ -674,7 +584,15 @@ async fn process_chat_sse<S>(
"tool_calls" if fn_call_state.active => {
// First, flush the terminal raw reasoning so UIs can finalize
// the reasoning stream before any exec/tool events begin.
if let Some(item) = reasoning_item.take() {
if !reasoning_text.is_empty() {
let item = ResponseItem::Reasoning {
id: String::new(),
summary: Vec::new(),
content: Some(vec![ReasoningItemContent::ReasoningText {
text: std::mem::take(&mut reasoning_text),
}]),
encrypted_content: None,
};
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
@@ -691,11 +609,26 @@ async fn process_chat_sse<S>(
"stop" => {
// Regular turn without tool-call. Emit the final assistant message
// as a single OutputItemDone so non-delta consumers see the result.
if let Some(item) = assistant_item.take() {
if !assistant_text.is_empty() {
let item = ResponseItem::Message {
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: std::mem::take(&mut assistant_text),
}],
id: None,
};
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
if let Some(item) = reasoning_item.take() {
if !reasoning_text.is_empty() {
let item = ResponseItem::Reasoning {
id: String::new(),
summary: Vec::new(),
content: Some(vec![ReasoningItemContent::ReasoningText {
text: std::mem::take(&mut reasoning_text),
}]),
encrypted_content: None,
};
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
}
@@ -914,8 +847,8 @@ where
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded))) => {
continue;
}
Poll::Ready(Some(Ok(ResponseEvent::OutputItemAdded(item)))) => {
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemAdded(item))));
Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { call_id }))) => {
return Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { call_id })));
}
}
}

View File

@@ -1,19 +1,17 @@
use std::io::BufRead;
use std::path::Path;
use std::sync::Arc;
use std::sync::OnceLock;
use std::time::Duration;
use crate::AuthManager;
use crate::auth::CodexAuth;
use crate::error::ConnectionFailedError;
use crate::error::ResponseStreamFailed;
use crate::error::RetryLimitReachedError;
use crate::error::UnexpectedResponseError;
use bytes::Bytes;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::AuthMode;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::SessionSource;
use eventsource_stream::Eventsource;
use futures::prelude::*;
use regex_lite::Regex;
@@ -29,8 +27,6 @@ use tracing::debug;
use tracing::trace;
use tracing::warn;
use crate::AuthManager;
use crate::auth::CodexAuth;
use crate::chat_completions::AggregateStreamExt;
use crate::chat_completions::stream_chat_completions;
use crate::client_common::Prompt;
@@ -40,14 +36,9 @@ use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::client_common::create_text_param_for_request;
use crate::config::Config;
use crate::default_client::CodexHttpClient;
use crate::default_client::create_client;
use crate::error::CodexErr;
use crate::error::ConnectionFailedError;
use crate::error::ResponseStreamFailed;
use crate::error::Result;
use crate::error::RetryLimitReachedError;
use crate::error::UnexpectedResponseError;
use crate::error::UsageLimitReachedError;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
@@ -57,9 +48,17 @@ use crate::openai_model_info::get_model_info;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::RateLimitWindow;
use crate::protocol::TokenUsage;
use crate::state::TaskKind;
use crate::token_data::PlanType;
use crate::tools::spec::create_tools_json_for_responses_api;
use crate::util::backoff;
use chrono::DateTime;
use chrono::Utc;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::models::ResponseItem;
use std::sync::Arc;
#[derive(Debug, Deserialize)]
struct ErrorResponse {
@@ -82,15 +81,13 @@ pub struct ModelClient {
config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>,
otel_event_manager: OtelEventManager,
client: CodexHttpClient,
client: reqwest::Client,
provider: ModelProviderInfo,
conversation_id: ConversationId,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
session_source: SessionSource,
}
#[allow(clippy::too_many_arguments)]
impl ModelClient {
pub fn new(
config: Arc<Config>,
@@ -100,7 +97,6 @@ impl ModelClient {
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
conversation_id: ConversationId,
session_source: SessionSource,
) -> Self {
let client = create_client();
@@ -113,7 +109,6 @@ impl ModelClient {
conversation_id,
effort,
summary,
session_source,
}
}
@@ -131,17 +126,20 @@ impl ModelClient {
})
}
pub fn config(&self) -> Arc<Config> {
Arc::clone(&self.config)
}
pub fn provider(&self) -> &ModelProviderInfo {
&self.provider
}
/// Dispatches to either the Responses or Chat implementation depending on
/// the provider config. Public callers always invoke `stream()` the
/// specialised helpers are private to avoid accidental misuse.
pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> {
self.stream_with_task_kind(prompt, TaskKind::Regular).await
}
pub(crate) async fn stream_with_task_kind(
&self,
prompt: &Prompt,
task_kind: TaskKind,
) -> Result<ResponseStream> {
match self.provider.wire_api {
WireApi::Responses => self.stream_responses(prompt).await,
WireApi::Responses => self.stream_responses(prompt, task_kind).await,
WireApi::Chat => {
// Create the raw streaming connection first.
let response_stream = stream_chat_completions(
@@ -150,7 +148,6 @@ impl ModelClient {
&self.client,
&self.provider,
&self.otel_event_manager,
&self.session_source,
)
.await?;
@@ -183,7 +180,11 @@ impl ModelClient {
}
/// Implementation for the OpenAI *Responses* experimental API.
async fn stream_responses(&self, prompt: &Prompt) -> Result<ResponseStream> {
async fn stream_responses(
&self,
prompt: &Prompt,
task_kind: TaskKind,
) -> Result<ResponseStream> {
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
// short circuit for tests
warn!(path, "Streaming from fixture");
@@ -213,16 +214,18 @@ impl ModelClient {
let input_with_instructions = prompt.get_formatted_input();
let verbosity = if self.config.model_family.support_verbosity {
self.config.model_verbosity
} else {
if self.config.model_verbosity.is_some() {
warn!(
"model_verbosity is set but ignored as the model does not support verbosity: {}",
self.config.model_family.family
);
let verbosity = match &self.config.model_family.family {
family if family == "gpt-5" => self.config.model_verbosity,
_ => {
if self.config.model_verbosity.is_some() {
warn!(
"model_verbosity is set but ignored for non-gpt-5 model family: {}",
self.config.model_family.family
);
}
None
}
None
};
// Only include `text.verbosity` for GPT-5 family models
@@ -260,7 +263,7 @@ impl ModelClient {
let max_attempts = self.provider.request_max_retries();
for attempt in 0..=max_attempts {
match self
.attempt_stream_responses(attempt, &payload_json, &auth_manager)
.attempt_stream_responses(attempt, &payload_json, &auth_manager, task_kind)
.await
{
Ok(stream) => {
@@ -288,6 +291,7 @@ impl ModelClient {
attempt: u64,
payload_json: &Value,
auth_manager: &Option<Arc<AuthManager>>,
task_kind: TaskKind,
) -> std::result::Result<ResponseStream, StreamAttemptError> {
// Always fetch the latest auth in case a prior attempt refreshed the token.
let auth = auth_manager.as_ref().and_then(|m| m.auth());
@@ -296,7 +300,6 @@ impl ModelClient {
"POST to {}: {:?}",
self.provider.get_full_url(&auth),
serde_json::to_string(payload_json)
.unwrap_or("<unable to serialize payload>".to_string())
);
let mut req_builder = self
@@ -305,24 +308,13 @@ impl ModelClient {
.await
.map_err(StreamAttemptError::Fatal)?;
// Include subagent header only for subagent sessions.
if let SessionSource::SubAgent(sub) = &self.session_source {
let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub {
label.clone()
} else {
serde_json::to_value(sub)
.ok()
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
.unwrap_or_else(|| "other".to_string())
};
req_builder = req_builder.header("x-openai-subagent", subagent);
}
req_builder = req_builder
.header("OpenAI-Beta", "responses=experimental")
// Send session_id for compatibility.
.header("conversation_id", self.conversation_id.to_string())
.header("session_id", self.conversation_id.to_string())
.header(reqwest::header::ACCEPT, "text/event-stream")
.header("Codex-Task-Type", task_kind.header_value())
.json(payload_json);
if let Some(auth) = auth.as_ref()
@@ -343,6 +335,12 @@ impl ModelClient {
.headers()
.get("cf-ray")
.map(|v| v.to_str().unwrap_or_default().to_string());
trace!(
"Response status: {}, cf-ray: {:?}",
resp.status(),
request_id
);
}
match res {
@@ -387,14 +385,9 @@ impl ModelClient {
if status == StatusCode::UNAUTHORIZED
&& let Some(manager) = auth_manager.as_ref()
&& let Some(auth) = auth.as_ref()
&& auth.mode == AuthMode::ChatGPT
&& manager.auth().is_some()
{
manager.refresh_token().await.map_err(|err| {
StreamAttemptError::Fatal(CodexErr::Fatal(format!(
"Failed to refresh ChatGPT credentials: {err}"
)))
})?;
let _ = manager.refresh_token().await;
}
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
@@ -465,10 +458,6 @@ impl ModelClient {
self.otel_event_manager.clone()
}
pub fn get_session_source(&self) -> SessionSource {
self.session_source.clone()
}
/// Returns the currently configured model slug.
pub fn get_model(&self) -> String {
self.config.model.clone()
@@ -639,13 +628,13 @@ fn parse_rate_limit_window(
headers: &HeaderMap,
used_percent_header: &str,
window_minutes_header: &str,
resets_at_header: &str,
resets_header: &str,
) -> Option<RateLimitWindow> {
let used_percent: Option<f64> = parse_header_f64(headers, used_percent_header);
used_percent.and_then(|used_percent| {
let window_minutes = parse_header_i64(headers, window_minutes_header);
let resets_at = parse_header_i64(headers, resets_at_header);
let resets_at = parse_header_i64(headers, resets_header);
let has_data = used_percent != 0.0
|| window_minutes.is_some_and(|minutes| minutes != 0)
@@ -876,15 +865,21 @@ async fn process_sse<S>(
| "response.in_progress"
| "response.output_text.done" => {}
"response.output_item.added" => {
let Some(item_val) = event.item else { continue };
let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else {
debug!("failed to parse ResponseItem from output_item.done");
continue;
};
let event = ResponseEvent::OutputItemAdded(item);
if tx_event.send(Ok(event)).await.is_err() {
return;
if let Some(item) = event.item.as_ref() {
// Detect web_search_call begin and forward a synthetic event upstream.
if let Some(ty) = item.get("type").and_then(|v| v.as_str())
&& ty == "web_search_call"
{
let call_id = item
.get("id")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let ev = ResponseEvent::WebSearchCallBegin { call_id };
if tx_event.send(Ok(ev)).await.is_err() {
return;
}
}
}
}
"response.reasoning_summary_part.added" => {
@@ -1098,7 +1093,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
@@ -1162,7 +1156,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
@@ -1199,7 +1192,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
@@ -1238,7 +1230,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
@@ -1273,7 +1264,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
@@ -1377,7 +1367,6 @@ mod tests {
base_url: Some("https://test.com".to_string()),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,

View File

@@ -23,11 +23,6 @@ use tokio::sync::mpsc;
/// Review thread system prompt. Edit `core/src/review_prompt.md` to customize.
pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md");
// Centralized templates for review-related user messages
pub const REVIEW_EXIT_SUCCESS_TMPL: &str = include_str!("../templates/review/exit_success.xml");
pub const REVIEW_EXIT_INTERRUPTED_TMPL: &str =
include_str!("../templates/review/exit_interrupted.xml");
/// API request payload for a single model turn
#[derive(Default, Debug, Clone)]
pub struct Prompt {
@@ -197,7 +192,6 @@ fn strip_total_output_header(output: &str) -> Option<&str> {
pub enum ResponseEvent {
Created,
OutputItemDone(ResponseItem),
OutputItemAdded(ResponseItem),
Completed {
response_id: String,
token_usage: Option<TokenUsage>,
@@ -206,6 +200,9 @@ pub enum ResponseEvent {
ReasoningSummaryDelta(String),
ReasoningContentDelta(String),
ReasoningSummaryPartAdded,
WebSearchCallBegin {
call_id: String,
},
RateLimits(RateLimitSnapshot),
}

File diff suppressed because it is too large Load Diff

View File

@@ -10,21 +10,21 @@ use crate::error::Result as CodexResult;
use crate::protocol::AgentMessageEvent;
use crate::protocol::CompactedItem;
use crate::protocol::ErrorEvent;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::InputItem;
use crate::protocol::InputMessageKind;
use crate::protocol::TaskStartedEvent;
use crate::protocol::TurnContextItem;
use crate::protocol::WarningEvent;
use crate::state::TaskKind;
use crate::truncate::truncate_middle;
use crate::util::backoff;
use askama::Template;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::user_input::UserInput;
use futures::prelude::*;
use tracing::error;
pub const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000;
@@ -40,34 +40,40 @@ pub(crate) async fn run_inline_auto_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
) {
let prompt = turn_context.compact_prompt().to_string();
let input = vec![UserInput::Text { text: prompt }];
run_compact_task_inner(sess, turn_context, input).await;
let sub_id = sess.next_internal_sub_id();
let input = vec![InputItem::Text {
text: SUMMARIZATION_PROMPT.to_string(),
}];
run_compact_task_inner(sess, turn_context, sub_id, input).await;
}
pub(crate) async fn run_compact_task(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
input: Vec<UserInput>,
sub_id: String,
input: Vec<InputItem>,
) -> Option<String> {
let start_event = EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
});
sess.send_event(&turn_context, start_event).await;
run_compact_task_inner(sess.clone(), turn_context, input).await;
let start_event = Event {
id: sub_id.clone(),
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
}),
};
sess.send_event(start_event).await;
run_compact_task_inner(sess.clone(), turn_context, sub_id.clone(), input).await;
None
}
async fn run_compact_task_inner(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
input: Vec<UserInput>,
sub_id: String,
input: Vec<InputItem>,
) {
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
let mut history = sess.clone_history().await;
history.record_items(&[initial_input_for_turn.into()]);
let mut turn_input = sess
.turn_input_with_history(vec![initial_input_for_turn.clone().into()])
.await;
let mut truncated_count = 0usize;
let max_retries = turn_context.client.get_provider().stream_max_retries();
@@ -84,18 +90,18 @@ async fn run_compact_task_inner(
sess.persist_rollout_items(&[rollout_item]).await;
loop {
let turn_input = history.get_history_for_prompt();
let prompt = Prompt {
input: turn_input.clone(),
..Default::default()
};
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
let attempt_result =
drain_to_completed(&sess, turn_context.as_ref(), &sub_id, &prompt).await;
match attempt_result {
Ok(()) => {
if truncated_count > 0 {
sess.notify_background_event(
turn_context.as_ref(),
&sub_id,
format!(
"Trimmed {truncated_count} older conversation item(s) before compacting so the prompt fits the model context window."
),
@@ -109,20 +115,20 @@ async fn run_compact_task_inner(
}
Err(e @ CodexErr::ContextWindowExceeded) => {
if turn_input.len() > 1 {
// Trim from the beginning to preserve cache (prefix-based) and keep recent messages intact.
error!(
"Context window exceeded while compacting; removing oldest history item. Error: {e}"
);
history.remove_first_item();
turn_input.remove(0);
truncated_count += 1;
retries = 0;
continue;
}
sess.set_total_tokens_full(turn_context.as_ref()).await;
let event = EventMsg::Error(ErrorEvent {
message: e.to_string(),
});
sess.send_event(&turn_context, event).await;
sess.set_total_tokens_full(&sub_id, turn_context.as_ref())
.await;
let event = Event {
id: sub_id.clone(),
msg: EventMsg::Error(ErrorEvent {
message: e.to_string(),
}),
};
sess.send_event(event).await;
return;
}
Err(e) => {
@@ -130,34 +136,31 @@ async fn run_compact_task_inner(
retries += 1;
let delay = backoff(retries);
sess.notify_stream_error(
turn_context.as_ref(),
format!("Reconnecting... {retries}/{max_retries}"),
&sub_id,
format!("Re-connecting... {retries}/{max_retries}"),
)
.await;
tokio::time::sleep(delay).await;
continue;
} else {
let event = EventMsg::Error(ErrorEvent {
message: e.to_string(),
});
sess.send_event(&turn_context, event).await;
let event = Event {
id: sub_id.clone(),
msg: EventMsg::Error(ErrorEvent {
message: e.to_string(),
}),
};
sess.send_event(event).await;
return;
}
}
}
}
let history_snapshot = sess.clone_history().await.get_history();
let history_snapshot = sess.history_snapshot().await;
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
let user_messages = collect_user_messages(&history_snapshot);
let initial_context = sess.build_initial_context(turn_context.as_ref());
let mut new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
let ghost_snapshots: Vec<ResponseItem> = history_snapshot
.iter()
.filter(|item| matches!(item, ResponseItem::GhostSnapshot { .. }))
.cloned()
.collect();
new_history.extend(ghost_snapshots);
let new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
sess.replace_history(new_history).await;
let rollout_item = RolloutItem::Compacted(CompactedItem {
@@ -165,15 +168,13 @@ async fn run_compact_task_inner(
});
sess.persist_rollout_items(&[rollout_item]).await;
let event = EventMsg::AgentMessage(AgentMessageEvent {
message: "Compact task completed".to_string(),
});
sess.send_event(&turn_context, event).await;
let warning = EventMsg::Warning(WarningEvent {
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start new a new conversation when possible to keep conversations small and targeted.".to_string(),
});
sess.send_event(&turn_context, warning).await;
let event = Event {
id: sub_id.clone(),
msg: EventMsg::AgentMessage(AgentMessageEvent {
message: "Compact task completed".to_string(),
}),
};
sess.send_event(event).await;
}
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
@@ -198,32 +199,29 @@ pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> {
items
.iter()
.filter_map(|item| match crate::event_mapping::parse_turn_item(item) {
Some(TurnItem::UserMessage(user)) => Some(user.message()),
.filter_map(|item| match item {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content)
}
_ => None,
})
.filter(|text| !is_session_prefix_message(text))
.collect()
}
pub fn is_session_prefix_message(text: &str) -> bool {
matches!(
InputMessageKind::from(("user", text)),
InputMessageKind::UserInstructions | InputMessageKind::EnvironmentContext
)
}
pub(crate) fn build_compacted_history(
initial_context: Vec<ResponseItem>,
user_messages: &[String],
summary_text: &str,
) -> Vec<ResponseItem> {
build_compacted_history_with_limit(
initial_context,
user_messages,
summary_text,
COMPACT_USER_MESSAGE_MAX_TOKENS * 4,
)
}
fn build_compacted_history_with_limit(
mut history: Vec<ResponseItem>,
user_messages: &[String],
summary_text: &str,
max_bytes: usize,
) -> Vec<ResponseItem> {
let mut history = initial_context;
let mut user_messages_text = if user_messages.is_empty() {
"(none)".to_string()
} else {
@@ -231,6 +229,7 @@ fn build_compacted_history_with_limit(
};
// Truncate the concatenated prior user messages so the bridge message
// stays well under the context window (approx. 4 bytes/token).
let max_bytes = COMPACT_USER_MESSAGE_MAX_TOKENS * 4;
if user_messages_text.len() > max_bytes {
user_messages_text = truncate_middle(&user_messages_text, max_bytes).0;
}
@@ -257,9 +256,14 @@ fn build_compacted_history_with_limit(
async fn drain_to_completed(
sess: &Session,
turn_context: &TurnContext,
sub_id: &str,
prompt: &Prompt,
) -> CodexResult<()> {
let mut stream = turn_context.client.clone().stream(prompt).await?;
let mut stream = turn_context
.client
.clone()
.stream_with_task_kind(prompt, TaskKind::Compact)
.await?;
loop {
let maybe_event = stream.next().await;
let Some(event) = maybe_event else {
@@ -273,10 +277,10 @@ async fn drain_to_completed(
sess.record_into_history(std::slice::from_ref(&item)).await;
}
Ok(ResponseEvent::RateLimits(snapshot)) => {
sess.update_rate_limits(turn_context, snapshot).await;
sess.update_rate_limits(sub_id, snapshot).await;
}
Ok(ResponseEvent::Completed { token_usage, .. }) => {
sess.update_token_usage_info(turn_context, token_usage.as_ref())
sess.update_token_usage_info(sub_id, turn_context, token_usage.as_ref())
.await;
return Ok(());
}
@@ -334,16 +338,21 @@ mod tests {
ResponseItem::Message {
id: Some("user".to_string()),
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "first".to_string(),
}],
content: vec![
ContentItem::InputText {
text: "first".to_string(),
},
ContentItem::OutputText {
text: "second".to_string(),
},
],
},
ResponseItem::Other,
];
let collected = collect_user_messages(&items);
assert_eq!(vec!["first".to_string()], collected);
assert_eq!(vec!["first\nsecond".to_string()], collected);
}
#[test]
@@ -353,8 +362,7 @@ mod tests {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\ndo things\n</INSTRUCTIONS>"
.to_string(),
text: "<user_instructions>do things</user_instructions>".to_string(),
}],
},
ResponseItem::Message {
@@ -380,16 +388,11 @@ mod tests {
#[test]
fn build_compacted_history_truncates_overlong_user_messages() {
// Use a small truncation limit so the test remains fast while still validating
// that oversized user content is truncated.
let max_bytes = 128;
let big = "X".repeat(max_bytes + 50);
let history = super::build_compacted_history_with_limit(
Vec::new(),
std::slice::from_ref(&big),
"SUMMARY",
max_bytes,
);
// Prepare a very large prior user message so the aggregated
// `user_messages_text` exceeds the truncation threshold used by
// `build_compacted_history` (80k bytes).
let big = "X".repeat(200_000);
let history = build_compacted_history(Vec::new(), std::slice::from_ref(&big), "SUMMARY");
// Expect exactly one bridge message added to history (plus any initial context we provided, which is none).
assert_eq!(history.len(), 1);

View File

@@ -3,21 +3,16 @@ use crate::error::Result as CodexResult;
use crate::protocol::Event;
use crate::protocol::Op;
use crate::protocol::Submission;
use std::path::PathBuf;
pub struct CodexConversation {
codex: Codex,
rollout_path: PathBuf,
}
/// Conduit for the bidirectional stream of messages that compose a conversation
/// in Codex.
impl CodexConversation {
pub(crate) fn new(codex: Codex, rollout_path: PathBuf) -> Self {
Self {
codex,
rollout_path,
}
pub(crate) fn new(codex: Codex) -> Self {
Self { codex }
}
pub async fn submit(&self, op: Op) -> CodexResult<String> {
@@ -32,8 +27,4 @@ impl CodexConversation {
pub async fn next_event(&self) -> CodexResult<Event> {
self.codex.next_event().await
}
pub fn rollout_path(&self) -> PathBuf {
self.rollout_path.clone()
}
}

View File

@@ -1,295 +0,0 @@
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use async_channel::Receiver;
use async_channel::Sender;
use codex_async_utils::OrCancelExt;
use codex_protocol::protocol::ApplyPatchApprovalRequestEvent;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::ExecApprovalRequestEvent;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_protocol::protocol::Submission;
use codex_protocol::user_input::UserInput;
use tokio_util::sync::CancellationToken;
use crate::AuthManager;
use crate::codex::Codex;
use crate::codex::CodexSpawnOk;
use crate::codex::SUBMISSION_CHANNEL_CAPACITY;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::config::Config;
use crate::error::CodexErr;
use codex_protocol::protocol::InitialHistory;
/// Start an interactive sub-Codex conversation and return IO channels.
///
/// The returned `events_rx` yields non-approval events emitted by the sub-agent.
/// Approval requests are handled via `parent_session` and are not surfaced.
/// The returned `ops_tx` allows the caller to submit additional `Op`s to the sub-agent.
pub(crate) async fn run_codex_conversation_interactive(
config: Config,
auth_manager: Arc<AuthManager>,
parent_session: Arc<Session>,
parent_ctx: Arc<TurnContext>,
cancel_token: CancellationToken,
initial_history: Option<InitialHistory>,
) -> Result<Codex, CodexErr> {
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
auth_manager,
initial_history.unwrap_or(InitialHistory::New),
SessionSource::SubAgent(SubAgentSource::Review),
)
.await?;
let codex = Arc::new(codex);
// Use a child token so parent cancel cascades but we can scope it to this task
let cancel_token_events = cancel_token.child_token();
let cancel_token_ops = cancel_token.child_token();
// Forward events from the sub-agent to the consumer, filtering approvals and
// routing them to the parent session for decisions.
let parent_session_clone = Arc::clone(&parent_session);
let parent_ctx_clone = Arc::clone(&parent_ctx);
let codex_for_events = Arc::clone(&codex);
tokio::spawn(async move {
let _ = forward_events(
codex_for_events,
tx_sub,
parent_session_clone,
parent_ctx_clone,
cancel_token_events.clone(),
)
.or_cancel(&cancel_token_events)
.await;
});
// Forward ops from the caller to the sub-agent.
let codex_for_ops = Arc::clone(&codex);
tokio::spawn(async move {
forward_ops(codex_for_ops, rx_ops, cancel_token_ops).await;
});
Ok(Codex {
next_id: AtomicU64::new(0),
tx_sub: tx_ops,
rx_event: rx_sub,
})
}
/// Convenience wrapper for one-time use with an initial prompt.
///
/// Internally calls the interactive variant, then immediately submits the provided input.
pub(crate) async fn run_codex_conversation_one_shot(
config: Config,
auth_manager: Arc<AuthManager>,
input: Vec<UserInput>,
parent_session: Arc<Session>,
parent_ctx: Arc<TurnContext>,
cancel_token: CancellationToken,
initial_history: Option<InitialHistory>,
) -> Result<Codex, CodexErr> {
// Use a child token so we can stop the delegate after completion without
// requiring the caller to cancel the parent token.
let child_cancel = cancel_token.child_token();
let io = run_codex_conversation_interactive(
config,
auth_manager,
parent_session,
parent_ctx,
child_cancel.clone(),
initial_history,
)
.await?;
// Send the initial input to kick off the one-shot turn.
io.submit(Op::UserInput { items: input }).await?;
// Bridge events so we can observe completion and shut down automatically.
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let ops_tx = io.tx_sub.clone();
let io_for_bridge = io;
tokio::spawn(async move {
while let Ok(event) = io_for_bridge.next_event().await {
let should_shutdown = matches!(
event.msg,
EventMsg::TaskComplete(_) | EventMsg::TurnAborted(_)
);
let _ = tx_bridge.send(event).await;
if should_shutdown {
let _ = ops_tx
.send(Submission {
id: "shutdown".to_string(),
op: Op::Shutdown {},
})
.await;
child_cancel.cancel();
break;
}
}
});
// For one-shot usage, return a closed `tx_sub` so callers cannot submit
// additional ops after the initial request. Create a channel and drop the
// receiver to close it immediately.
let (tx_closed, rx_closed) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
drop(rx_closed);
Ok(Codex {
next_id: AtomicU64::new(0),
rx_event: rx_bridge,
tx_sub: tx_closed,
})
}
async fn forward_events(
codex: Arc<Codex>,
tx_sub: Sender<Event>,
parent_session: Arc<Session>,
parent_ctx: Arc<TurnContext>,
cancel_token: CancellationToken,
) {
while let Ok(event) = codex.next_event().await {
match event {
Event {
id: _,
msg: EventMsg::SessionConfigured(_),
} => continue,
Event {
id,
msg: EventMsg::ExecApprovalRequest(event),
} => {
// Initiate approval via parent session; do not surface to consumer.
handle_exec_approval(
&codex,
id,
&parent_session,
&parent_ctx,
event,
&cancel_token,
)
.await;
}
Event {
id,
msg: EventMsg::ApplyPatchApprovalRequest(event),
} => {
handle_patch_approval(
&codex,
id,
&parent_session,
&parent_ctx,
event,
&cancel_token,
)
.await;
}
other => {
let _ = tx_sub.send(other).await;
}
}
}
}
/// Forward ops from a caller to a sub-agent, respecting cancellation.
async fn forward_ops(
codex: Arc<Codex>,
rx_ops: Receiver<Submission>,
cancel_token_ops: CancellationToken,
) {
loop {
let op: Op = match rx_ops.recv().or_cancel(&cancel_token_ops).await {
Ok(Ok(Submission { id: _, op })) => op,
Ok(Err(_)) | Err(_) => break,
};
let _ = codex.submit(op).await;
}
}
/// Handle an ExecApprovalRequest by consulting the parent session and replying.
async fn handle_exec_approval(
codex: &Codex,
id: String,
parent_session: &Session,
parent_ctx: &TurnContext,
event: ExecApprovalRequestEvent,
cancel_token: &CancellationToken,
) {
// Race approval with cancellation and timeout to avoid hangs.
let approval_fut = parent_session.request_command_approval(
parent_ctx,
parent_ctx.sub_id.clone(),
event.command,
event.cwd,
event.reason,
event.risk,
);
let decision = await_approval_with_cancel(
approval_fut,
parent_session,
&parent_ctx.sub_id,
cancel_token,
)
.await;
let _ = codex.submit(Op::ExecApproval { id, decision }).await;
}
/// Handle an ApplyPatchApprovalRequest by consulting the parent session and replying.
async fn handle_patch_approval(
codex: &Codex,
id: String,
parent_session: &Session,
parent_ctx: &TurnContext,
event: ApplyPatchApprovalRequestEvent,
cancel_token: &CancellationToken,
) {
let decision_rx = parent_session
.request_patch_approval(
parent_ctx,
parent_ctx.sub_id.clone(),
event.changes,
event.reason,
event.grant_root,
)
.await;
let decision = await_approval_with_cancel(
async move { decision_rx.await.unwrap_or_default() },
parent_session,
&parent_ctx.sub_id,
cancel_token,
)
.await;
let _ = codex.submit(Op::PatchApproval { id, decision }).await;
}
/// Await an approval decision, aborting on cancellation.
async fn await_approval_with_cancel<F>(
fut: F,
parent_session: &Session,
sub_id: &str,
cancel_token: &CancellationToken,
) -> codex_protocol::protocol::ReviewDecision
where
F: core::future::Future<Output = codex_protocol::protocol::ReviewDecision>,
{
tokio::select! {
biased;
_ = cancel_token.cancelled() => {
parent_session
.notify_approval(sub_id, codex_protocol::protocol::ReviewDecision::Abort)
.await;
codex_protocol::protocol::ReviewDecision::Abort
}
decision = fut => {
decision
}
}
}

View File

@@ -1,4 +1,4 @@
use crate::bash::parse_shell_lc_plain_commands;
use crate::bash::parse_bash_lc_plain_commands;
pub fn command_might_be_dangerous(command: &[String]) -> bool {
if is_dangerous_to_call_with_exec(command) {
@@ -6,7 +6,7 @@ pub fn command_might_be_dangerous(command: &[String]) -> bool {
}
// Support `bash -lc "<script>"` where the any part of the script might contain a dangerous command.
if let Some(all_commands) = parse_shell_lc_plain_commands(command)
if let Some(all_commands) = parse_bash_lc_plain_commands(command)
&& all_commands
.iter()
.any(|cmd| is_dangerous_to_call_with_exec(cmd))
@@ -57,15 +57,6 @@ mod tests {
])));
}
#[test]
fn zsh_git_reset_is_dangerous() {
assert!(command_might_be_dangerous(&vec_str(&[
"zsh",
"-lc",
"git reset --hard"
])));
}
#[test]
fn git_status_is_not_dangerous() {
assert!(!command_might_be_dangerous(&vec_str(&["git", "status"])));

View File

@@ -1,4 +1,4 @@
use crate::bash::parse_shell_lc_plain_commands;
use crate::bash::parse_bash_lc_plain_commands;
pub fn is_known_safe_command(command: &[String]) -> bool {
let command: Vec<String> = command
@@ -29,7 +29,7 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
// introduce side effects ( "&&", "||", ";", and "|" ). If every
// individual command in the script is itself a knownsafe command, then
// the composite expression is considered safe.
if let Some(all_commands) = parse_shell_lc_plain_commands(&command)
if let Some(all_commands) = parse_bash_lc_plain_commands(&command)
&& !all_commands.is_empty()
&& all_commands
.iter()
@@ -201,11 +201,6 @@ mod tests {
])));
}
#[test]
fn zsh_lc_safe_command_sequence() {
assert!(is_known_safe_command(&vec_str(&["zsh", "-lc", "ls"])));
}
#[test]
fn unknown_or_partial() {
assert!(!is_safe_to_call_with_exec(&vec_str(&["foo"])));

File diff suppressed because it is too large Load Diff

View File

@@ -1,954 +0,0 @@
use crate::config::CONFIG_TOML_FILE;
use crate::config::types::McpServerConfig;
use crate::config::types::Notice;
use anyhow::Context;
use codex_protocol::config_types::ReasoningEffort;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tokio::task;
use toml_edit::DocumentMut;
use toml_edit::Item as TomlItem;
use toml_edit::Table as TomlTable;
use toml_edit::value;
/// Discrete config mutations supported by the persistence engine.
#[derive(Clone, Debug)]
pub enum ConfigEdit {
/// Update the active (or default) model selection and optional reasoning effort.
SetModel {
model: Option<String>,
effort: Option<ReasoningEffort>,
},
/// Toggle the acknowledgement flag under `[notice]`.
SetNoticeHideFullAccessWarning(bool),
/// Toggle the Windows onboarding acknowledgement flag.
SetWindowsWslSetupAcknowledged(bool),
/// Replace the entire `[mcp_servers]` table.
ReplaceMcpServers(BTreeMap<String, McpServerConfig>),
/// Set trust_level = "trusted" under `[projects."<path>"]`,
/// migrating inline tables to explicit tables.
SetProjectTrusted(PathBuf),
/// Set the value stored at the exact dotted path.
SetPath {
segments: Vec<String>,
value: TomlItem,
},
/// Remove the value stored at the exact dotted path.
ClearPath { segments: Vec<String> },
}
// TODO(jif) move to a dedicated file
mod document_helpers {
use crate::config::types::McpServerConfig;
use crate::config::types::McpServerTransportConfig;
use toml_edit::Array as TomlArray;
use toml_edit::InlineTable;
use toml_edit::Item as TomlItem;
use toml_edit::Table as TomlTable;
use toml_edit::value;
pub(super) fn ensure_table_for_write(item: &mut TomlItem) -> Option<&mut TomlTable> {
match item {
TomlItem::Table(table) => Some(table),
TomlItem::Value(value) => {
if let Some(inline) = value.as_inline_table() {
*item = TomlItem::Table(table_from_inline(inline));
item.as_table_mut()
} else {
*item = TomlItem::Table(new_implicit_table());
item.as_table_mut()
}
}
TomlItem::None => {
*item = TomlItem::Table(new_implicit_table());
item.as_table_mut()
}
_ => None,
}
}
pub(super) fn ensure_table_for_read(item: &mut TomlItem) -> Option<&mut TomlTable> {
match item {
TomlItem::Table(table) => Some(table),
TomlItem::Value(value) => {
let inline = value.as_inline_table()?;
*item = TomlItem::Table(table_from_inline(inline));
item.as_table_mut()
}
_ => None,
}
}
pub(super) fn serialize_mcp_server(config: &McpServerConfig) -> TomlItem {
let mut entry = TomlTable::new();
entry.set_implicit(false);
match &config.transport {
McpServerTransportConfig::Stdio {
command,
args,
env,
env_vars,
cwd,
} => {
entry["command"] = value(command.clone());
if !args.is_empty() {
entry["args"] = array_from_iter(args.iter().cloned());
}
if let Some(env) = env
&& !env.is_empty()
{
entry["env"] = table_from_pairs(env.iter());
}
if !env_vars.is_empty() {
entry["env_vars"] = array_from_iter(env_vars.iter().cloned());
}
if let Some(cwd) = cwd {
entry["cwd"] = value(cwd.to_string_lossy().to_string());
}
}
McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
} => {
entry["url"] = value(url.clone());
if let Some(env_var) = bearer_token_env_var {
entry["bearer_token_env_var"] = value(env_var.clone());
}
if let Some(headers) = http_headers
&& !headers.is_empty()
{
entry["http_headers"] = table_from_pairs(headers.iter());
}
if let Some(headers) = env_http_headers
&& !headers.is_empty()
{
entry["env_http_headers"] = table_from_pairs(headers.iter());
}
}
}
if !config.enabled {
entry["enabled"] = value(false);
}
if let Some(timeout) = config.startup_timeout_sec {
entry["startup_timeout_sec"] = value(timeout.as_secs_f64());
}
if let Some(timeout) = config.tool_timeout_sec {
entry["tool_timeout_sec"] = value(timeout.as_secs_f64());
}
if let Some(enabled_tools) = &config.enabled_tools
&& !enabled_tools.is_empty()
{
entry["enabled_tools"] = array_from_iter(enabled_tools.iter().cloned());
}
if let Some(disabled_tools) = &config.disabled_tools
&& !disabled_tools.is_empty()
{
entry["disabled_tools"] = array_from_iter(disabled_tools.iter().cloned());
}
TomlItem::Table(entry)
}
fn table_from_inline(inline: &InlineTable) -> TomlTable {
let mut table = new_implicit_table();
for (key, value) in inline.iter() {
let mut value = value.clone();
let decor = value.decor_mut();
decor.set_suffix("");
table.insert(key, TomlItem::Value(value));
}
table
}
pub(super) fn new_implicit_table() -> TomlTable {
let mut table = TomlTable::new();
table.set_implicit(true);
table
}
fn array_from_iter<I>(iter: I) -> TomlItem
where
I: Iterator<Item = String>,
{
let mut array = TomlArray::new();
for value in iter {
array.push(value);
}
TomlItem::Value(array.into())
}
fn table_from_pairs<'a, I>(pairs: I) -> TomlItem
where
I: IntoIterator<Item = (&'a String, &'a String)>,
{
let mut entries: Vec<_> = pairs.into_iter().collect();
entries.sort_by(|(a, _), (b, _)| a.cmp(b));
let mut table = TomlTable::new();
table.set_implicit(false);
for (key, val) in entries {
table.insert(key, value(val.clone()));
}
TomlItem::Table(table)
}
}
struct ConfigDocument {
doc: DocumentMut,
profile: Option<String>,
}
#[derive(Copy, Clone)]
enum Scope {
Global,
Profile,
}
#[derive(Copy, Clone)]
enum TraversalMode {
Create,
Existing,
}
impl ConfigDocument {
fn new(doc: DocumentMut, profile: Option<String>) -> Self {
Self { doc, profile }
}
fn apply(&mut self, edit: &ConfigEdit) -> anyhow::Result<bool> {
match edit {
ConfigEdit::SetModel { model, effort } => Ok({
let mut mutated = false;
mutated |= self.write_profile_value(
&["model"],
model.as_ref().map(|model_value| value(model_value.clone())),
);
mutated |= self.write_profile_value(
&["model_reasoning_effort"],
effort.map(|effort| value(effort.to_string())),
);
mutated
}),
ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, "hide_full_access_warning"],
value(*acknowledged),
)),
ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged) => Ok(self.write_value(
Scope::Global,
&["windows_wsl_setup_acknowledged"],
value(*acknowledged),
)),
ConfigEdit::ReplaceMcpServers(servers) => Ok(self.replace_mcp_servers(servers)),
ConfigEdit::SetPath { segments, value } => Ok(self.insert(segments, value.clone())),
ConfigEdit::ClearPath { segments } => Ok(self.clear_owned(segments)),
ConfigEdit::SetProjectTrusted(project_path) => {
// Delegate to the existing, tested logic in config.rs to
// ensure tables are explicit and migration is preserved.
crate::config::set_project_trusted_inner(&mut self.doc, project_path.as_path())?;
Ok(true)
}
}
}
fn write_profile_value(&mut self, segments: &[&str], value: Option<TomlItem>) -> bool {
match value {
Some(item) => self.write_value(Scope::Profile, segments, item),
None => self.clear(Scope::Profile, segments),
}
}
fn write_value(&mut self, scope: Scope, segments: &[&str], value: TomlItem) -> bool {
let resolved = self.scoped_segments(scope, segments);
self.insert(&resolved, value)
}
fn clear(&mut self, scope: Scope, segments: &[&str]) -> bool {
let resolved = self.scoped_segments(scope, segments);
self.remove(&resolved)
}
fn clear_owned(&mut self, segments: &[String]) -> bool {
self.remove(segments)
}
fn replace_mcp_servers(&mut self, servers: &BTreeMap<String, McpServerConfig>) -> bool {
if servers.is_empty() {
return self.clear(Scope::Global, &["mcp_servers"]);
}
let mut table = TomlTable::new();
table.set_implicit(true);
for (name, config) in servers {
table.insert(name, document_helpers::serialize_mcp_server(config));
}
let item = TomlItem::Table(table);
self.write_value(Scope::Global, &["mcp_servers"], item)
}
fn scoped_segments(&self, scope: Scope, segments: &[&str]) -> Vec<String> {
let resolved: Vec<String> = segments
.iter()
.map(|segment| (*segment).to_string())
.collect();
if matches!(scope, Scope::Profile)
&& resolved.first().is_none_or(|segment| segment != "profiles")
&& let Some(profile) = self.profile.as_deref()
{
let mut scoped = Vec::with_capacity(resolved.len() + 2);
scoped.push("profiles".to_string());
scoped.push(profile.to_string());
scoped.extend(resolved);
return scoped;
}
resolved
}
fn insert(&mut self, segments: &[String], value: TomlItem) -> bool {
let Some((last, parents)) = segments.split_last() else {
return false;
};
let Some(parent) = self.descend(parents, TraversalMode::Create) else {
return false;
};
parent[last] = value;
true
}
fn remove(&mut self, segments: &[String]) -> bool {
let Some((last, parents)) = segments.split_last() else {
return false;
};
let Some(parent) = self.descend(parents, TraversalMode::Existing) else {
return false;
};
parent.remove(last).is_some()
}
fn descend(&mut self, segments: &[String], mode: TraversalMode) -> Option<&mut TomlTable> {
let mut current = self.doc.as_table_mut();
for segment in segments {
match mode {
TraversalMode::Create => {
if !current.contains_key(segment.as_str()) {
current.insert(
segment.as_str(),
TomlItem::Table(document_helpers::new_implicit_table()),
);
}
let item = current.get_mut(segment.as_str())?;
current = document_helpers::ensure_table_for_write(item)?;
}
TraversalMode::Existing => {
let item = current.get_mut(segment.as_str())?;
current = document_helpers::ensure_table_for_read(item)?;
}
}
}
Some(current)
}
}
/// Persist edits using a blocking strategy.
pub fn apply_blocking(
codex_home: &Path,
profile: Option<&str>,
edits: &[ConfigEdit],
) -> anyhow::Result<()> {
if edits.is_empty() {
return Ok(());
}
let config_path = codex_home.join(CONFIG_TOML_FILE);
let serialized = match std::fs::read_to_string(&config_path) {
Ok(contents) => contents,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(),
Err(err) => return Err(err.into()),
};
let doc = if serialized.is_empty() {
DocumentMut::new()
} else {
serialized.parse::<DocumentMut>()?
};
let profile = profile.map(ToOwned::to_owned).or_else(|| {
doc.get("profile")
.and_then(|item| item.as_str())
.map(ToOwned::to_owned)
});
let mut document = ConfigDocument::new(doc, profile);
let mut mutated = false;
for edit in edits {
mutated |= document.apply(edit)?;
}
if !mutated {
return Ok(());
}
std::fs::create_dir_all(codex_home).with_context(|| {
format!(
"failed to create Codex home directory at {}",
codex_home.display()
)
})?;
let tmp = NamedTempFile::new_in(codex_home)?;
std::fs::write(tmp.path(), document.doc.to_string()).with_context(|| {
format!(
"failed to write temporary config file at {}",
tmp.path().display()
)
})?;
tmp.persist(config_path)?;
Ok(())
}
/// Persist edits asynchronously by offloading the blocking writer.
pub async fn apply(
codex_home: &Path,
profile: Option<&str>,
edits: Vec<ConfigEdit>,
) -> anyhow::Result<()> {
let codex_home = codex_home.to_path_buf();
let profile = profile.map(ToOwned::to_owned);
task::spawn_blocking(move || apply_blocking(&codex_home, profile.as_deref(), &edits))
.await
.context("config persistence task panicked")?
}
/// Fluent builder to batch config edits and apply them atomically.
#[derive(Default)]
pub struct ConfigEditsBuilder {
codex_home: PathBuf,
profile: Option<String>,
edits: Vec<ConfigEdit>,
}
impl ConfigEditsBuilder {
pub fn new(codex_home: &Path) -> Self {
Self {
codex_home: codex_home.to_path_buf(),
profile: None,
edits: Vec::new(),
}
}
pub fn with_profile(mut self, profile: Option<&str>) -> Self {
self.profile = profile.map(ToOwned::to_owned);
self
}
pub fn set_model(mut self, model: Option<&str>, effort: Option<ReasoningEffort>) -> Self {
self.edits.push(ConfigEdit::SetModel {
model: model.map(ToOwned::to_owned),
effort,
});
self
}
pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged));
self
}
pub fn set_windows_wsl_setup_acknowledged(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged));
self
}
pub fn replace_mcp_servers(mut self, servers: &BTreeMap<String, McpServerConfig>) -> Self {
self.edits
.push(ConfigEdit::ReplaceMcpServers(servers.clone()));
self
}
pub fn set_project_trusted<P: Into<PathBuf>>(mut self, project_path: P) -> Self {
self.edits
.push(ConfigEdit::SetProjectTrusted(project_path.into()));
self
}
/// Apply edits on a blocking thread.
pub fn apply_blocking(self) -> anyhow::Result<()> {
apply_blocking(&self.codex_home, self.profile.as_deref(), &self.edits)
}
/// Apply edits asynchronously via a blocking offload.
pub async fn apply(self) -> anyhow::Result<()> {
task::spawn_blocking(move || {
apply_blocking(&self.codex_home, self.profile.as_deref(), &self.edits)
})
.await
.context("config persistence task panicked")?
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::types::McpServerTransportConfig;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
use tokio::runtime::Builder;
use toml::Value as TomlValue;
#[test]
fn blocking_set_model_top_level() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("gpt-5-codex".to_string()),
effort: Some(ReasoningEffort::High),
}],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"model = "gpt-5-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_model_preserves_inline_table_contents() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
// Seed with inline tables for profiles to simulate common user config.
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"profile = "fast"
profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } }
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("o4-mini".to_string()),
effort: None,
}],
)
.expect("persist");
let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let value: TomlValue = toml::from_str(&raw).expect("parse config");
// Ensure sandbox_mode is preserved under profiles.fast and model updated.
let profiles_tbl = value
.get("profiles")
.and_then(|v| v.as_table())
.expect("profiles table");
let fast_tbl = profiles_tbl
.get("fast")
.and_then(|v| v.as_table())
.expect("fast table");
assert_eq!(
fast_tbl.get("sandbox_mode").and_then(|v| v.as_str()),
Some("strict")
);
assert_eq!(
fast_tbl.get("model").and_then(|v| v.as_str()),
Some("o4-mini")
);
}
#[test]
fn blocking_clear_model_removes_inline_table_entry() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"profile = "fast"
profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } }
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: None,
effort: Some(ReasoningEffort::High),
}],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"profile = "fast"
[profiles.fast]
sandbox_mode = "strict"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_model_scopes_to_active_profile() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"profile = "team"
[profiles.team]
model_reasoning_effort = "low"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("o5-preview".to_string()),
effort: Some(ReasoningEffort::Minimal),
}],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"profile = "team"
[profiles.team]
model_reasoning_effort = "minimal"
model = "o5-preview"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_model_with_explicit_profile() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[profiles."team a"]
model = "gpt-5-codex"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
Some("team a"),
&[ConfigEdit::SetModel {
model: Some("o4-mini".to_string()),
effort: None,
}],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[profiles."team a"]
model = "o4-mini"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_hide_full_access_warning_preserves_table() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"# Global comment
[notice]
# keep me
existing = "value"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetNoticeHideFullAccessWarning(true)],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"# Global comment
[notice]
# keep me
existing = "value"
hide_full_access_warning = true
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_replace_mcp_servers_round_trips() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let mut servers = BTreeMap::new();
servers.insert(
"stdio".to_string(),
McpServerConfig {
transport: McpServerTransportConfig::Stdio {
command: "cmd".to_string(),
args: vec!["--flag".to_string()],
env: Some(
[
("B".to_string(), "2".to_string()),
("A".to_string(), "1".to_string()),
]
.into_iter()
.collect(),
),
env_vars: vec!["FOO".to_string()],
cwd: None,
},
enabled: true,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: Some(vec!["one".to_string(), "two".to_string()]),
disabled_tools: None,
},
);
servers.insert(
"http".to_string(),
McpServerConfig {
transport: McpServerTransportConfig::StreamableHttp {
url: "https://example.com".to_string(),
bearer_token_env_var: Some("TOKEN".to_string()),
http_headers: Some(
[("Z-Header".to_string(), "z".to_string())]
.into_iter()
.collect(),
),
env_http_headers: None,
},
enabled: false,
startup_timeout_sec: Some(std::time::Duration::from_secs(5)),
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: Some(vec!["forbidden".to_string()]),
},
);
apply_blocking(
codex_home,
None,
&[ConfigEdit::ReplaceMcpServers(servers.clone())],
)
.expect("persist");
let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = "\
[mcp_servers.http]
url = \"https://example.com\"
bearer_token_env_var = \"TOKEN\"
enabled = false
startup_timeout_sec = 5.0
disabled_tools = [\"forbidden\"]
[mcp_servers.http.http_headers]
Z-Header = \"z\"
[mcp_servers.stdio]
command = \"cmd\"
args = [\"--flag\"]
env_vars = [\"FOO\"]
enabled_tools = [\"one\", \"two\"]
[mcp_servers.stdio.env]
A = \"1\"
B = \"2\"
";
assert_eq!(raw, expected);
}
#[test]
fn blocking_clear_path_noop_when_missing() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
apply_blocking(
codex_home,
None,
&[ConfigEdit::ClearPath {
segments: vec!["missing".to_string()],
}],
)
.expect("apply");
assert!(
!codex_home.join(CONFIG_TOML_FILE).exists(),
"config.toml should not be created on noop"
);
}
#[test]
fn blocking_set_path_updates_notifications() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let item = value(false);
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetPath {
segments: vec!["tui".to_string(), "notifications".to_string()],
value: item,
}],
)
.expect("apply");
let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let config: TomlValue = toml::from_str(&raw).expect("parse config");
let notifications = config
.get("tui")
.and_then(|item| item.as_table())
.and_then(|tbl| tbl.get("notifications"))
.and_then(toml::Value::as_bool);
assert_eq!(notifications, Some(false));
}
#[tokio::test]
async fn async_builder_set_model_persists() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path().to_path_buf();
ConfigEditsBuilder::new(&codex_home)
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::High))
.apply()
.await
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"model = "gpt-5-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_builder_set_model_round_trips_back_and_forth() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let initial_expected = r#"model = "o4-mini"
model_reasoning_effort = "low"
"#;
ConfigEditsBuilder::new(codex_home)
.set_model(Some("o4-mini"), Some(ReasoningEffort::Low))
.apply_blocking()
.expect("persist initial");
let mut contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, initial_expected);
let updated_expected = r#"model = "gpt-5-codex"
model_reasoning_effort = "high"
"#;
ConfigEditsBuilder::new(codex_home)
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::High))
.apply_blocking()
.expect("persist update");
contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, updated_expected);
ConfigEditsBuilder::new(codex_home)
.set_model(Some("o4-mini"), Some(ReasoningEffort::Low))
.apply_blocking()
.expect("persist revert");
contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, initial_expected);
}
#[test]
fn blocking_set_asynchronous_helpers_available() {
let rt = Builder::new_current_thread()
.enable_all()
.build()
.expect("runtime");
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path().to_path_buf();
rt.block_on(async {
ConfigEditsBuilder::new(&codex_home)
.set_hide_full_access_warning(true)
.apply()
.await
.expect("persist");
});
let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let notice = toml::from_str::<TomlValue>(&raw)
.expect("parse config")
.get("notice")
.and_then(|item| item.as_table())
.and_then(|tbl| tbl.get("hide_full_access_warning"))
.and_then(toml::Value::as_bool);
assert_eq!(notice, Some(true));
}
#[test]
fn replace_mcp_servers_blocking_clears_table_when_empty() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
"[mcp_servers]\nfoo = { command = \"cmd\" }\n",
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::ReplaceMcpServers(BTreeMap::new())],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert!(!contents.contains("mcp_servers"));
}
}

View File

@@ -0,0 +1,748 @@
use crate::config::CONFIG_TOML_FILE;
use anyhow::Result;
use std::path::Path;
use tempfile::NamedTempFile;
use toml_edit::DocumentMut;
pub const CONFIG_KEY_MODEL: &str = "model";
pub const CONFIG_KEY_EFFORT: &str = "model_reasoning_effort";
#[derive(Copy, Clone)]
enum NoneBehavior {
Skip,
Remove,
}
/// Persist overrides into `config.toml` using explicit key segments per
/// override. This avoids ambiguity with keys that contain dots or spaces.
pub async fn persist_overrides(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], &str)],
) -> Result<()> {
let with_options: Vec<(&[&str], Option<&str>)> = overrides
.iter()
.map(|(segments, value)| (*segments, Some(*value)))
.collect();
persist_overrides_with_behavior(codex_home, profile, &with_options, NoneBehavior::Skip).await
}
/// Persist overrides where values may be optional. Any entries with `None`
/// values are skipped. If all values are `None`, this becomes a no-op and
/// returns `Ok(())` without touching the file.
pub async fn persist_non_null_overrides(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
) -> Result<()> {
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Skip).await
}
/// Persist overrides where `None` values clear any existing values from the
/// configuration file.
pub async fn persist_overrides_and_clear_if_none(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
) -> Result<()> {
persist_overrides_with_behavior(codex_home, profile, overrides, NoneBehavior::Remove).await
}
/// Apply a single override onto a `toml_edit` document while preserving
/// existing formatting/comments.
/// The key is expressed as explicit segments to correctly handle keys that
/// contain dots or spaces.
fn apply_toml_edit_override_segments(
doc: &mut DocumentMut,
segments: &[&str],
value: toml_edit::Item,
) {
use toml_edit::Item;
if segments.is_empty() {
return;
}
let mut current = doc.as_table_mut();
for seg in &segments[..segments.len() - 1] {
if !current.contains_key(seg) {
current[*seg] = Item::Table(toml_edit::Table::new());
if let Some(t) = current[*seg].as_table_mut() {
t.set_implicit(true);
}
}
let maybe_item = current.get_mut(seg);
let Some(item) = maybe_item else { return };
if !item.is_table() {
*item = Item::Table(toml_edit::Table::new());
if let Some(t) = item.as_table_mut() {
t.set_implicit(true);
}
}
let Some(tbl) = item.as_table_mut() else {
return;
};
current = tbl;
}
let last = segments[segments.len() - 1];
current[last] = value;
}
async fn persist_overrides_with_behavior(
codex_home: &Path,
profile: Option<&str>,
overrides: &[(&[&str], Option<&str>)],
none_behavior: NoneBehavior,
) -> Result<()> {
if overrides.is_empty() {
return Ok(());
}
let should_skip = match none_behavior {
NoneBehavior::Skip => overrides.iter().all(|(_, value)| value.is_none()),
NoneBehavior::Remove => false,
};
if should_skip {
return Ok(());
}
let config_path = codex_home.join(CONFIG_TOML_FILE);
let read_result = tokio::fs::read_to_string(&config_path).await;
let mut doc = match read_result {
Ok(contents) => contents.parse::<DocumentMut>()?,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
if overrides
.iter()
.all(|(_, value)| value.is_none() && matches!(none_behavior, NoneBehavior::Remove))
{
return Ok(());
}
tokio::fs::create_dir_all(codex_home).await?;
DocumentMut::new()
}
Err(e) => return Err(e.into()),
};
let effective_profile = if let Some(p) = profile {
Some(p.to_owned())
} else {
doc.get("profile")
.and_then(|i| i.as_str())
.map(str::to_string)
};
let mut mutated = false;
for (segments, value) in overrides.iter().copied() {
let mut seg_buf: Vec<&str> = Vec::new();
let segments_to_apply: &[&str];
if let Some(ref name) = effective_profile {
if segments.first().copied() == Some("profiles") {
segments_to_apply = segments;
} else {
seg_buf.reserve(2 + segments.len());
seg_buf.push("profiles");
seg_buf.push(name.as_str());
seg_buf.extend_from_slice(segments);
segments_to_apply = seg_buf.as_slice();
}
} else {
segments_to_apply = segments;
}
match value {
Some(v) => {
let item_value = toml_edit::value(v);
apply_toml_edit_override_segments(&mut doc, segments_to_apply, item_value);
mutated = true;
}
None => {
if matches!(none_behavior, NoneBehavior::Remove)
&& remove_toml_edit_segments(&mut doc, segments_to_apply)
{
mutated = true;
}
}
}
}
if !mutated {
return Ok(());
}
let tmp_file = NamedTempFile::new_in(codex_home)?;
tokio::fs::write(tmp_file.path(), doc.to_string()).await?;
tmp_file.persist(config_path)?;
Ok(())
}
fn remove_toml_edit_segments(doc: &mut DocumentMut, segments: &[&str]) -> bool {
use toml_edit::Item;
if segments.is_empty() {
return false;
}
let mut current = doc.as_table_mut();
for seg in &segments[..segments.len() - 1] {
let Some(item) = current.get_mut(seg) else {
return false;
};
match item {
Item::Table(table) => {
current = table;
}
_ => {
return false;
}
}
}
current.remove(segments[segments.len() - 1]).is_some()
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
/// Verifies model and effort are written at top-level when no profile is set.
#[tokio::test]
async fn set_default_model_and_effort_top_level_when_no_profile() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_overrides(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
(&[CONFIG_KEY_EFFORT], "high"),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"model = "gpt-5-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
/// Verifies values are written under the active profile when `profile` is set.
#[tokio::test]
async fn set_defaults_update_profile_when_profile_set() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed config with a profile selection but without profiles table
let seed = "profile = \"o3\"\n";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], "o3"),
(&[CONFIG_KEY_EFFORT], "minimal"),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"profile = "o3"
[profiles.o3]
model = "o3"
model_reasoning_effort = "minimal"
"#;
assert_eq!(contents, expected);
}
/// Verifies profile names with dots/spaces are preserved via explicit segments.
#[tokio::test]
async fn set_defaults_update_profile_with_dot_and_space() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed config with a profile name that contains a dot and a space
let seed = "profile = \"my.team name\"\n";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], "o3"),
(&[CONFIG_KEY_EFFORT], "minimal"),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"profile = "my.team name"
[profiles."my.team name"]
model = "o3"
model_reasoning_effort = "minimal"
"#;
assert_eq!(contents, expected);
}
/// Verifies explicit profile override writes under that profile even without active profile.
#[tokio::test]
async fn set_defaults_update_when_profile_override_supplied() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// No profile key in config.toml
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), "")
.await
.expect("seed write");
// Persist with an explicit profile override
persist_overrides(
codex_home,
Some("o3"),
&[(&[CONFIG_KEY_MODEL], "o3"), (&[CONFIG_KEY_EFFORT], "high")],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"[profiles.o3]
model = "o3"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
/// Verifies nested tables are created as needed when applying overrides.
#[tokio::test]
async fn persist_overrides_creates_nested_tables() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_overrides(
codex_home,
None,
&[
(&["a", "b", "c"], "v"),
(&["x"], "y"),
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5-codex"),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"x = "y"
[a.b]
c = "v"
[profiles.p1]
model = "gpt-5-codex"
"#;
assert_eq!(contents, expected);
}
/// Verifies a scalar key becomes a table when nested keys are written.
#[tokio::test]
async fn persist_overrides_replaces_scalar_with_table() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
let seed = "foo = \"bar\"\n";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides(codex_home, None, &[(&["foo", "bar", "baz"], "ok")])
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"[foo.bar]
baz = "ok"
"#;
assert_eq!(contents, expected);
}
/// Verifies comments and spacing are preserved when writing under active profile.
#[tokio::test]
async fn set_defaults_preserve_comments() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed a config with comments and spacing we expect to preserve
let seed = r#"# Global comment
# Another line
profile = "o3"
# Profile settings
[profiles.o3]
# keep me
existing = "keep"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
// Apply defaults; since profile is set, it should write under [profiles.o3]
persist_overrides(
codex_home,
None,
&[(&[CONFIG_KEY_MODEL], "o3"), (&[CONFIG_KEY_EFFORT], "high")],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"# Global comment
# Another line
profile = "o3"
# Profile settings
[profiles.o3]
# keep me
existing = "keep"
model = "o3"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
/// Verifies comments and spacing are preserved when writing at top level.
#[tokio::test]
async fn set_defaults_preserve_global_comments() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed a config WITHOUT a profile, containing comments and spacing
let seed = r#"# Top-level comments
# should be preserved
existing = "keep"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
// Since there is no profile, the defaults should be written at top-level
persist_overrides(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
(&[CONFIG_KEY_EFFORT], "minimal"),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"# Top-level comments
# should be preserved
existing = "keep"
model = "gpt-5-codex"
model_reasoning_effort = "minimal"
"#;
assert_eq!(contents, expected);
}
/// Verifies errors on invalid TOML propagate and file is not clobbered.
#[tokio::test]
async fn persist_overrides_errors_on_parse_failure() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Write an intentionally invalid TOML file
let invalid = "invalid = [unclosed";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), invalid)
.await
.expect("seed write");
// Attempting to persist should return an error and must not clobber the file.
let res = persist_overrides(codex_home, None, &[(&["x"], "y")]).await;
assert!(res.is_err(), "expected parse error to propagate");
// File should be unchanged
let contents = read_config(codex_home).await;
assert_eq!(contents, invalid);
}
/// Verifies changing model only preserves existing effort at top-level.
#[tokio::test]
async fn changing_only_model_preserves_existing_effort_top_level() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed with an effort value only
let seed = "model_reasoning_effort = \"minimal\"\n";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
// Change only the model
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_MODEL], "o3")])
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"model_reasoning_effort = "minimal"
model = "o3"
"#;
assert_eq!(contents, expected);
}
/// Verifies changing effort only preserves existing model at top-level.
#[tokio::test]
async fn changing_only_effort_preserves_existing_model_top_level() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed with a model value only
let seed = "model = \"gpt-5-codex\"\n";
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
// Change only the effort
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_EFFORT], "high")])
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"model = "gpt-5-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
/// Verifies changing model only preserves existing effort in active profile.
#[tokio::test]
async fn changing_only_model_preserves_effort_in_active_profile() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// Seed with an active profile and an existing effort under that profile
let seed = r#"profile = "p1"
[profiles.p1]
model_reasoning_effort = "low"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides(codex_home, None, &[(&[CONFIG_KEY_MODEL], "o4-mini")])
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"profile = "p1"
[profiles.p1]
model_reasoning_effort = "low"
model = "o4-mini"
"#;
assert_eq!(contents, expected);
}
/// Verifies changing effort only preserves existing model in a profile override.
#[tokio::test]
async fn changing_only_effort_preserves_model_in_profile_override() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
// No active profile key; we'll target an explicit override
let seed = r#"[profiles.team]
model = "gpt-5-codex"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides(
codex_home,
Some("team"),
&[(&[CONFIG_KEY_EFFORT], "minimal")],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"[profiles.team]
model = "gpt-5-codex"
model_reasoning_effort = "minimal"
"#;
assert_eq!(contents, expected);
}
/// Verifies `persist_non_null_overrides` skips `None` entries and writes only present values at top-level.
#[tokio::test]
async fn persist_non_null_skips_none_top_level() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_non_null_overrides(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], Some("gpt-5-codex")),
(&[CONFIG_KEY_EFFORT], None),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = "model = \"gpt-5-codex\"\n";
assert_eq!(contents, expected);
}
/// Verifies no-op behavior when all provided overrides are `None` (no file created/modified).
#[tokio::test]
async fn persist_non_null_noop_when_all_none() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_non_null_overrides(
codex_home,
None,
&[(&["a"], None), (&["profiles", "p", "x"], None)],
)
.await
.expect("persist");
// Should not create config.toml on a pure no-op
assert!(!codex_home.join(CONFIG_TOML_FILE).exists());
}
/// Verifies entries are written under the specified profile and `None` entries are skipped.
#[tokio::test]
async fn persist_non_null_respects_profile_override() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_non_null_overrides(
codex_home,
Some("team"),
&[
(&[CONFIG_KEY_MODEL], Some("o3")),
(&[CONFIG_KEY_EFFORT], None),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"[profiles.team]
model = "o3"
"#;
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_removes_top_level_value() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
let seed = r#"model = "gpt-5-codex"
model_reasoning_effort = "medium"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides_and_clear_if_none(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], None),
(&[CONFIG_KEY_EFFORT], Some("high")),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = "model_reasoning_effort = \"high\"\n";
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_respects_active_profile() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
let seed = r#"profile = "team"
[profiles.team]
model = "gpt-4"
model_reasoning_effort = "minimal"
"#;
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
.await
.expect("seed write");
persist_overrides_and_clear_if_none(
codex_home,
None,
&[
(&[CONFIG_KEY_MODEL], None),
(&[CONFIG_KEY_EFFORT], Some("high")),
],
)
.await
.expect("persist");
let contents = read_config(codex_home).await;
let expected = r#"profile = "team"
[profiles.team]
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[tokio::test]
async fn persist_clear_none_noop_when_file_missing() {
let tmpdir = tempdir().expect("tmp");
let codex_home = tmpdir.path();
persist_overrides_and_clear_if_none(codex_home, None, &[(&[CONFIG_KEY_MODEL], None)])
.await
.expect("persist");
assert!(!codex_home.join(CONFIG_TOML_FILE).exists());
}
// Test helper moved to bottom per review guidance.
async fn read_config(codex_home: &Path) -> String {
let p = codex_home.join(CONFIG_TOML_FILE);
tokio::fs::read_to_string(p).await.unwrap_or_default()
}
}

View File

@@ -4,7 +4,6 @@ use std::path::PathBuf;
use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
/// Collection of common configuration options that a user can define as a unit
@@ -16,19 +15,18 @@ pub struct ConfigProfile {
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
pub experimental_instructions_file: Option<PathBuf>,
pub experimental_compact_prompt_file: Option<PathBuf>,
pub include_plan_tool: Option<bool>,
pub include_apply_patch_tool: Option<bool>,
pub include_view_image_tool: Option<bool>,
pub experimental_use_unified_exec_tool: Option<bool>,
pub experimental_use_exec_command_tool: Option<bool>,
pub experimental_use_rmcp_client: Option<bool>,
pub experimental_use_freeform_apply_patch: Option<bool>,
pub experimental_sandbox_command_assessment: Option<bool>,
pub tools_web_search: Option<bool>,
pub tools_view_image: Option<bool>,
/// Optional feature toggles scoped to this profile.

View File

@@ -35,14 +35,6 @@ pub struct McpServerConfig {
/// Default timeout for MCP tool calls initiated via this server.
#[serde(default, with = "option_duration_secs")]
pub tool_timeout_sec: Option<Duration>,
/// Explicit allow-list of tools exposed from this server. When set, only these tools will be registered.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled_tools: Option<Vec<String>>,
/// Explicit deny-list of tools. These tools will be removed after applying `enabled_tools`.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disabled_tools: Option<Vec<String>>,
}
impl<'de> Deserialize<'de> for McpServerConfig {
@@ -50,7 +42,7 @@ impl<'de> Deserialize<'de> for McpServerConfig {
where
D: Deserializer<'de>,
{
#[derive(Deserialize, Clone)]
#[derive(Deserialize)]
struct RawMcpServerConfig {
// stdio
command: Option<String>,
@@ -80,13 +72,9 @@ impl<'de> Deserialize<'de> for McpServerConfig {
tool_timeout_sec: Option<Duration>,
#[serde(default)]
enabled: Option<bool>,
#[serde(default)]
enabled_tools: Option<Vec<String>>,
#[serde(default)]
disabled_tools: Option<Vec<String>>,
}
let mut raw = RawMcpServerConfig::deserialize(deserializer)?;
let raw = RawMcpServerConfig::deserialize(deserializer)?;
let startup_timeout_sec = match (raw.startup_timeout_sec, raw.startup_timeout_ms) {
(Some(sec), _) => {
@@ -96,10 +84,6 @@ impl<'de> Deserialize<'de> for McpServerConfig {
(None, Some(ms)) => Some(Duration::from_millis(ms)),
(None, None) => None,
};
let tool_timeout_sec = raw.tool_timeout_sec;
let enabled = raw.enabled.unwrap_or_else(default_enabled);
let enabled_tools = raw.enabled_tools.clone();
let disabled_tools = raw.disabled_tools.clone();
fn throw_if_set<E, T>(transport: &str, field: &str, value: Option<&T>) -> Result<(), E>
where
@@ -113,46 +97,72 @@ impl<'de> Deserialize<'de> for McpServerConfig {
)))
}
let transport = if let Some(command) = raw.command.clone() {
throw_if_set("stdio", "url", raw.url.as_ref())?;
throw_if_set(
"stdio",
"bearer_token_env_var",
raw.bearer_token_env_var.as_ref(),
)?;
throw_if_set("stdio", "bearer_token", raw.bearer_token.as_ref())?;
throw_if_set("stdio", "http_headers", raw.http_headers.as_ref())?;
throw_if_set("stdio", "env_http_headers", raw.env_http_headers.as_ref())?;
McpServerTransportConfig::Stdio {
command,
args: raw.args.clone().unwrap_or_default(),
env: raw.env.clone(),
env_vars: raw.env_vars.clone().unwrap_or_default(),
cwd: raw.cwd.take(),
}
} else if let Some(url) = raw.url.clone() {
throw_if_set("streamable_http", "args", raw.args.as_ref())?;
throw_if_set("streamable_http", "env", raw.env.as_ref())?;
throw_if_set("streamable_http", "env_vars", raw.env_vars.as_ref())?;
throw_if_set("streamable_http", "cwd", raw.cwd.as_ref())?;
throw_if_set("streamable_http", "bearer_token", raw.bearer_token.as_ref())?;
McpServerTransportConfig::StreamableHttp {
let transport = match raw {
RawMcpServerConfig {
command: Some(command),
args,
env,
env_vars,
cwd,
url,
bearer_token_env_var: raw.bearer_token_env_var.clone(),
http_headers: raw.http_headers.clone(),
env_http_headers: raw.env_http_headers.take(),
bearer_token_env_var,
http_headers,
env_http_headers,
..
} => {
throw_if_set("stdio", "url", url.as_ref())?;
throw_if_set(
"stdio",
"bearer_token_env_var",
bearer_token_env_var.as_ref(),
)?;
throw_if_set("stdio", "http_headers", http_headers.as_ref())?;
throw_if_set("stdio", "env_http_headers", env_http_headers.as_ref())?;
McpServerTransportConfig::Stdio {
command,
args: args.unwrap_or_default(),
env,
env_vars: env_vars.unwrap_or_default(),
cwd,
}
}
} else {
return Err(SerdeError::custom("invalid transport"));
RawMcpServerConfig {
url: Some(url),
bearer_token,
bearer_token_env_var,
command,
args,
env,
env_vars,
cwd,
http_headers,
env_http_headers,
startup_timeout_sec: _,
tool_timeout_sec: _,
startup_timeout_ms: _,
enabled: _,
} => {
throw_if_set("streamable_http", "command", command.as_ref())?;
throw_if_set("streamable_http", "args", args.as_ref())?;
throw_if_set("streamable_http", "env", env.as_ref())?;
throw_if_set("streamable_http", "env_vars", env_vars.as_ref())?;
throw_if_set("streamable_http", "cwd", cwd.as_ref())?;
throw_if_set("streamable_http", "bearer_token", bearer_token.as_ref())?;
McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
}
}
_ => return Err(SerdeError::custom("invalid transport")),
};
Ok(Self {
transport,
startup_timeout_sec,
tool_timeout_sec,
enabled,
enabled_tools,
disabled_tools,
tool_timeout_sec: raw.tool_timeout_sec,
enabled: raw.enabled.unwrap_or_else(default_enabled),
})
}
}
@@ -361,7 +371,7 @@ pub struct Notice {
}
impl Notice {
/// referenced by config_edit helpers when writing notice flags
/// used by set_hide_full_access_warning until we refactor config updates
pub(crate) const TABLE_KEY: &'static str = "notice";
}
@@ -517,8 +527,6 @@ mod tests {
}
);
assert!(cfg.enabled);
assert!(cfg.enabled_tools.is_none());
assert!(cfg.disabled_tools.is_none());
}
#[test]
@@ -693,21 +701,6 @@ mod tests {
);
}
#[test]
fn deserialize_server_config_with_tool_filters() {
let cfg: McpServerConfig = toml::from_str(
r#"
command = "echo"
enabled_tools = ["allowed"]
disabled_tools = ["blocked"]
"#,
)
.expect("should deserialize tool filters");
assert_eq!(cfg.enabled_tools, Some(vec!["allowed".to_string()]));
assert_eq!(cfg.disabled_tools, Some(vec!["blocked".to_string()]));
}
#[test]
fn deserialize_rejects_command_and_url() {
toml::from_str::<McpServerConfig>(

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,8 @@ use crate::CodexAuth;
use crate::codex::Codex;
use crate::codex::CodexSpawnOk;
use crate::codex::INITIAL_SUBMIT_ID;
use crate::codex::compact::content_items_to_text;
use crate::codex::compact::is_session_prefix_message;
use crate::codex_conversation::CodexConversation;
use crate::config::Config;
use crate::error::CodexErr;
@@ -12,7 +14,6 @@ use crate::protocol::EventMsg;
use crate::protocol::SessionConfiguredEvent;
use crate::rollout::RolloutRecorder;
use codex_protocol::ConversationId;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::InitialHistory;
use codex_protocol::protocol::RolloutItem;
@@ -73,7 +74,7 @@ impl ConversationManager {
config,
auth_manager,
InitialHistory::New,
self.session_source.clone(),
self.session_source,
)
.await?;
self.finalize_spawn(codex, conversation_id).await
@@ -98,10 +99,7 @@ impl ConversationManager {
}
};
let conversation = Arc::new(CodexConversation::new(
codex,
session_configured.rollout_path.clone(),
));
let conversation = Arc::new(CodexConversation::new(codex));
self.conversations
.write()
.await
@@ -132,26 +130,10 @@ impl ConversationManager {
auth_manager: Arc<AuthManager>,
) -> CodexResult<NewConversation> {
let initial_history = RolloutRecorder::get_rollout_history(&rollout_path).await?;
self.resume_conversation_with_history(config, initial_history, auth_manager)
.await
}
pub async fn resume_conversation_with_history(
&self,
config: Config,
initial_history: InitialHistory,
auth_manager: Arc<AuthManager>,
) -> CodexResult<NewConversation> {
let CodexSpawnOk {
codex,
conversation_id,
} = Codex::spawn(
config,
auth_manager,
initial_history,
self.session_source.clone(),
)
.await?;
} = Codex::spawn(config, auth_manager, initial_history, self.session_source).await?;
self.finalize_spawn(codex, conversation_id).await
}
@@ -185,7 +167,7 @@ impl ConversationManager {
let CodexSpawnOk {
codex,
conversation_id,
} = Codex::spawn(config, auth_manager, history, self.session_source.clone()).await?;
} = Codex::spawn(config, auth_manager, history, self.session_source).await?;
self.finalize_spawn(codex, conversation_id).await
}
@@ -200,11 +182,9 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
// Find indices of user message inputs in rollout order.
let mut user_positions: Vec<usize> = Vec::new();
for (idx, item) in items.iter().enumerate() {
if let RolloutItem::ResponseItem(item @ ResponseItem::Message { .. }) = item
&& matches!(
crate::event_mapping::parse_turn_item(item),
Some(TurnItem::UserMessage(_))
)
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = item
&& role == "user"
&& content_items_to_text(content).is_some_and(|text| !is_session_prefix_message(&text))
{
user_positions.push(idx);
}

View File

@@ -1,13 +1,5 @@
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
use http::Error as HttpError;
use reqwest::IntoUrl;
use reqwest::Method;
use reqwest::Response;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use serde::Serialize;
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::LazyLock;
use std::sync::Mutex;
use std::sync::OnceLock;
@@ -30,130 +22,6 @@ use std::sync::OnceLock;
pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None));
pub const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE";
#[derive(Clone, Debug)]
pub struct CodexHttpClient {
inner: reqwest::Client,
}
impl CodexHttpClient {
fn new(inner: reqwest::Client) -> Self {
Self { inner }
}
pub fn get<U>(&self, url: U) -> CodexRequestBuilder
where
U: IntoUrl,
{
self.request(Method::GET, url)
}
pub fn post<U>(&self, url: U) -> CodexRequestBuilder
where
U: IntoUrl,
{
self.request(Method::POST, url)
}
pub fn request<U>(&self, method: Method, url: U) -> CodexRequestBuilder
where
U: IntoUrl,
{
let url_str = url.as_str().to_string();
CodexRequestBuilder::new(self.inner.request(method.clone(), url), method, url_str)
}
}
#[must_use = "requests are not sent unless `send` is awaited"]
#[derive(Debug)]
pub struct CodexRequestBuilder {
builder: reqwest::RequestBuilder,
method: Method,
url: String,
}
impl CodexRequestBuilder {
fn new(builder: reqwest::RequestBuilder, method: Method, url: String) -> Self {
Self {
builder,
method,
url,
}
}
fn map(self, f: impl FnOnce(reqwest::RequestBuilder) -> reqwest::RequestBuilder) -> Self {
Self {
builder: f(self.builder),
method: self.method,
url: self.url,
}
}
pub fn header<K, V>(self, key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
HeaderValue: TryFrom<V>,
<HeaderValue as TryFrom<V>>::Error: Into<HttpError>,
{
self.map(|builder| builder.header(key, value))
}
pub fn bearer_auth<T>(self, token: T) -> Self
where
T: Display,
{
self.map(|builder| builder.bearer_auth(token))
}
pub fn json<T>(self, value: &T) -> Self
where
T: ?Sized + Serialize,
{
self.map(|builder| builder.json(value))
}
pub async fn send(self) -> Result<Response, reqwest::Error> {
match self.builder.send().await {
Ok(response) => {
let request_ids = Self::extract_request_ids(&response);
tracing::debug!(
method = %self.method,
url = %self.url,
status = %response.status(),
request_ids = ?request_ids,
version = ?response.version(),
"Request completed"
);
Ok(response)
}
Err(error) => {
let status = error.status();
tracing::debug!(
method = %self.method,
url = %self.url,
status = status.map(|s| s.as_u16()),
error = %error,
"Request failed"
);
Err(error)
}
}
}
fn extract_request_ids(response: &Response) -> HashMap<String, String> {
["cf-ray", "x-request-id", "x-oai-request-id"]
.iter()
.filter_map(|&name| {
let header_name = HeaderName::from_static(name);
let value = response.headers().get(header_name)?;
let value = value.to_str().ok()?.to_owned();
Some((name.to_owned(), value))
})
.collect()
}
}
#[derive(Debug, Clone)]
pub struct Originator {
pub value: String,
@@ -256,8 +124,8 @@ fn sanitize_user_agent(candidate: String, fallback: &str) -> String {
}
}
/// Create an HTTP client with default `originator` and `User-Agent` headers set.
pub fn create_client() -> CodexHttpClient {
/// Create a reqwest client with default `originator` and `User-Agent` headers set.
pub fn create_client() -> reqwest::Client {
use reqwest::header::HeaderMap;
let mut headers = HeaderMap::new();
@@ -272,8 +140,7 @@ pub fn create_client() -> CodexHttpClient {
builder = builder.no_proxy();
}
let inner = builder.build().unwrap_or_else(|_| reqwest::Client::new());
CodexHttpClient::new(inner)
builder.build().unwrap_or_else(|_| reqwest::Client::new())
}
fn is_sandboxed() -> bool {

View File

@@ -1,11 +1,8 @@
use crate::codex::ProcessedResponseItem;
use crate::exec::ExecToolCallOutput;
use crate::token_data::KnownPlan;
use crate::token_data::PlanType;
use crate::truncate::truncate_middle;
use chrono::DateTime;
use chrono::Datelike;
use chrono::Local;
use chrono::Utc;
use codex_async_utils::CancelErr;
use codex_protocol::ConversationId;
@@ -56,11 +53,8 @@ pub enum SandboxErr {
#[derive(Error, Debug)]
pub enum CodexErr {
// todo(aibrahim): git rid of this error carrying the dangling artifacts
#[error("turn aborted. Something went wrong? Hit `/feedback` to report the issue.")]
TurnAborted {
dangling_artifacts: Vec<ProcessedResponseItem>,
},
#[error("turn aborted")]
TurnAborted,
/// Returned by ResponsesClient when the SSE stream disconnects or errors out **after** the HTTP
/// handshake has succeeded but **before** it finished emitting `response.completed`.
@@ -93,7 +87,7 @@ pub enum CodexErr {
/// Returned by run_command_stream when the user pressed CtrlC (SIGINT). Session uses this to
/// surface a polite FunctionCallOutput back to the model instead of crashing the CLI.
#[error("interrupted (Ctrl-C). Something went wrong? Hit `/feedback` to report the issue.")]
#[error("interrupted (Ctrl-C)")]
Interrupted,
/// Unexpected HTTP status code.
@@ -164,9 +158,7 @@ pub enum CodexErr {
impl From<CancelErr> for CodexErr {
fn from(_: CancelErr) -> Self {
CodexErr::TurnAborted {
dangling_artifacts: Vec::new(),
}
CodexErr::TurnAborted
}
}
@@ -255,7 +247,7 @@ impl std::fmt::Display for UsageLimitReachedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let message = match self.plan_type.as_ref() {
Some(PlanType::Known(KnownPlan::Plus)) => format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits{}",
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing){}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
@@ -268,11 +260,8 @@ impl std::fmt::Display for UsageLimitReachedError {
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro)) => format!(
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits{}",
retry_suffix_after_or(self.resets_at.as_ref())
),
Some(PlanType::Known(KnownPlan::Enterprise))
Some(PlanType::Known(KnownPlan::Pro))
| Some(PlanType::Known(KnownPlan::Enterprise))
| Some(PlanType::Known(KnownPlan::Edu)) => format!(
"You've hit your usage limit.{}",
retry_suffix(self.resets_at.as_ref())
@@ -288,46 +277,28 @@ impl std::fmt::Display for UsageLimitReachedError {
}
fn retry_suffix(resets_at: Option<&DateTime<Utc>>) -> String {
if let Some(resets_at) = resets_at {
let formatted = format_retry_timestamp(resets_at);
format!(" Try again at {formatted}.")
if let Some(secs) = remaining_seconds(resets_at) {
let reset_duration = format_reset_duration(secs);
format!(" Try again in {reset_duration}.")
} else {
" Try again later.".to_string()
}
}
fn retry_suffix_after_or(resets_at: Option<&DateTime<Utc>>) -> String {
if let Some(resets_at) = resets_at {
let formatted = format_retry_timestamp(resets_at);
format!(" or try again at {formatted}.")
if let Some(secs) = remaining_seconds(resets_at) {
let reset_duration = format_reset_duration(secs);
format!(" or try again in {reset_duration}.")
} else {
" or try again later.".to_string()
}
}
fn format_retry_timestamp(resets_at: &DateTime<Utc>) -> String {
let local_reset = resets_at.with_timezone(&Local);
let local_now = now_for_retry().with_timezone(&Local);
if local_reset.date_naive() == local_now.date_naive() {
local_reset.format("%-I:%M %p").to_string()
} else {
let suffix = day_suffix(local_reset.day());
local_reset
.format(&format!("%b %-d{suffix}, %Y %-I:%M %p"))
.to_string()
}
}
fn day_suffix(day: u32) -> &'static str {
match day {
11..=13 => "th",
_ => match day % 10 {
1 => "st",
2 => "nd", // codespell:ignore
3 => "rd",
_ => "th",
},
}
fn remaining_seconds(resets_at: Option<&DateTime<Utc>>) -> Option<u64> {
let resets_at = resets_at.cloned()?;
let now = now_for_retry();
let secs = resets_at.signed_duration_since(now).num_seconds();
Some(if secs <= 0 { 0 } else { secs as u64 })
}
#[cfg(test)]
@@ -346,6 +317,36 @@ fn now_for_retry() -> DateTime<Utc> {
Utc::now()
}
fn format_reset_duration(total_secs: u64) -> String {
let days = total_secs / 86_400;
let hours = (total_secs % 86_400) / 3_600;
let minutes = (total_secs % 3_600) / 60;
let mut parts: Vec<String> = Vec::new();
if days > 0 {
let unit = if days == 1 { "day" } else { "days" };
parts.push(format!("{days} {unit}"));
}
if hours > 0 {
let unit = if hours == 1 { "hour" } else { "hours" };
parts.push(format!("{hours} {unit}"));
}
if minutes > 0 {
let unit = if minutes == 1 { "minute" } else { "minutes" };
parts.push(format!("{minutes} {unit}"));
}
if parts.is_empty() {
return "less than a minute".to_string();
}
match parts.len() {
1 => parts[0].clone(),
2 => format!("{} {}", parts[0], parts[1]),
_ => format!("{} {} {}", parts[0], parts[1], parts[2]),
}
}
#[derive(Debug)]
pub struct EnvVarError {
/// Name of the environment variable that is missing.
@@ -460,7 +461,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again later."
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again later."
);
}
@@ -562,16 +563,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(1);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Team)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. To get more access now, send a request to your admin or try again at {expected_time}."
assert_eq!(
err.to_string(),
"You've hit your usage limit. To get more access now, send a request to your admin or try again in 1 hour."
);
assert_eq!(err.to_string(), expected);
});
}
@@ -591,7 +591,7 @@ mod tests {
#[test]
fn usage_limit_reached_error_formats_default_for_other_plans() {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Enterprise)),
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
resets_at: None,
rate_limits: Some(rate_limit_snapshot()),
};
@@ -601,37 +601,20 @@ mod tests {
);
}
#[test]
fn usage_limit_reached_error_formats_pro_plan_with_reset() {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(1);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
);
assert_eq!(err.to_string(), expected);
});
}
#[test]
fn usage_limit_reached_includes_minutes_when_available() {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::minutes(5);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 5 minutes."
);
});
}
@@ -640,16 +623,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::hours(3) + ChronoDuration::minutes(32);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!(
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}."
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again in 3 hours 32 minutes."
);
assert_eq!(err.to_string(), expected);
});
}
@@ -659,14 +641,15 @@ mod tests {
let resets_at =
base + ChronoDuration::days(2) + ChronoDuration::hours(3) + ChronoDuration::minutes(5);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in 2 days 3 hours 5 minutes."
);
});
}
@@ -675,14 +658,15 @@ mod tests {
let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap();
let resets_at = base + ChronoDuration::seconds(30);
with_now_override(base, move || {
let expected_time = format_retry_timestamp(&resets_at);
let err = UsageLimitReachedError {
plan_type: None,
resets_at: Some(resets_at),
rate_limits: Some(rate_limit_snapshot()),
};
let expected = format!("You've hit your usage limit. Try again at {expected_time}.");
assert_eq!(err.to_string(), expected);
assert_eq!(
err.to_string(),
"You've hit your usage limit. Try again in less than a minute."
);
});
}
}

Some files were not shown because too many files have changed in this diff Show More