Compare commits

..

1 Commits

Author SHA1 Message Date
Ahmed Ibrahim
c12b629417 Remove Op::UserInput and defer user-turn defaults 2026-01-26 19:41:02 -08:00
386 changed files with 5152 additions and 26419 deletions

View File

@@ -1,4 +1,3 @@
# Without this, Bazel will consider BUILD.bazel files in
# .git/sl/origbackups (which can be populated by Sapling SCM).
.git
codex-rs/target

View File

@@ -1,19 +1,13 @@
common --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
# Dummy xcode config so we don't need to build xcode_locator in repo rule.
common --xcode_version_config=//:disable_xcode
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
common --remote_cache_compression
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir
# Runfiles strategy rationale: codex-rs/utils/cargo-bin/README.md
common --noenable_runfiles
common --enable_platform_specific_config
# TODO(zbarsky): We need to untangle these libc constraints to get linux remote builds working.
common:linux --host_platform=//:local
@@ -49,3 +43,4 @@ common --jobs=30
common:remote --extra_execution_platforms=//:rbe
common:remote --remote_executor=grpcs://remote.buildbuddy.io
common:remote --jobs=800

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm

View File

@@ -59,7 +59,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
with:
components: rustfmt
- name: cargo fmt
@@ -77,7 +77,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
@@ -177,31 +177,11 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
@@ -222,10 +202,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
@@ -268,14 +244,6 @@ jobs:
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
@@ -309,58 +277,6 @@ jobs:
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
@@ -406,10 +322,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
@@ -510,7 +422,7 @@ jobs:
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
with:
targets: ${{ matrix.target }}

View File

@@ -21,6 +21,7 @@ jobs:
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -89,30 +90,10 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- uses: actions/cache@v5
with:
path: |
@@ -120,10 +101,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
@@ -139,58 +116,6 @@ jobs:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- name: Cargo build
shell: bash
run: |
@@ -327,7 +252,6 @@ jobs:
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
@@ -381,7 +305,7 @@ jobs:
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
(cd "$bundle_dir" && 7z a "$(pwd)/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"

View File

@@ -24,7 +24,7 @@ jobs:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
- name: build codex
run: cargo build --bin codex

View File

@@ -93,17 +93,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.install_musl }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.92
with:
targets: ${{ matrix.target }}
@@ -119,58 +109,6 @@ jobs:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.install_musl }}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.install_musl }}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
@@ -344,6 +282,7 @@ jobs:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
@@ -436,6 +375,12 @@ jobs:
id-token: write
contents: read
steps:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
@@ -443,7 +388,6 @@ jobs:
registry-url: https://registry.npmjs.org
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest

View File

@@ -11,7 +11,6 @@ In the codex-rs folder where the rust code lives:
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.

View File

@@ -1,7 +1,3 @@
load("@apple_support//xcode:xcode_config.bzl", "xcode_config")
xcode_config(name = "disable_xcode")
# We mark the local platform as glibc-compatible so that rust can grab a toolchain for us.
# TODO(zbarsky): Upstream a better libc constraint into rules_rust.
# We only enable this on linux though for sanity, and because it breaks remote execution.

View File

@@ -27,8 +27,6 @@ register_toolchains(
"@toolchains_llvm_bootstrapped//toolchain:all",
)
# Needed to disable xcode...
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rust", version = "0.68.1")
@@ -55,7 +53,7 @@ rust = use_extension("@rules_rust//rust:extensions.bzl", "rust")
rust.toolchain(
edition = "2024",
extra_target_triples = RUST_TRIPLES,
versions = ["1.93.0"],
versions = ["1.90.0"],
)
use_repo(rust, "rust_toolchains")
@@ -69,11 +67,6 @@ crate.from_cargo(
cargo_toml = "//codex-rs:Cargo.toml",
platform_triples = RUST_TRIPLES,
)
crate.annotation(
crate = "nucleo-matcher",
strip_prefix = "matcher",
version = "0.3.1",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
@@ -92,11 +85,6 @@ crate.annotation(
inject_repo(crate, "openssl")
crate.annotation(
crate = "runfiles",
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
)
# Fix readme inclusions
crate.annotation(
crate = "windows-link",

119
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

70
PNPM.md Normal file
View File

@@ -0,0 +1,70 @@
# Migration to pnpm
This project has been migrated from npm to pnpm to improve dependency management and developer experience.
## Why pnpm?
- **Faster installation**: pnpm is significantly faster than npm and yarn
- **Disk space savings**: pnpm uses a content-addressable store to avoid duplication
- **Phantom dependency prevention**: pnpm creates a strict node_modules structure
- **Native workspaces support**: simplified monorepo management
## How to use pnpm
### Installation
```bash
# Global installation of pnpm
npm install -g pnpm@10.8.1
# Or with corepack (available with Node.js 22+)
corepack enable
corepack prepare pnpm@10.8.1 --activate
```
### Common commands
| npm command | pnpm equivalent |
| --------------- | ---------------- |
| `npm install` | `pnpm install` |
| `npm run build` | `pnpm run build` |
| `npm test` | `pnpm test` |
| `npm run lint` | `pnpm run lint` |
### Workspace-specific commands
| Action | Command |
| ------------------------------------------ | ---------------------------------------- |
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
| Run a command in all packages | `pnpm -r run test` |
## Monorepo structure
```
codex/
├── pnpm-workspace.yaml # Workspace configuration
├── .npmrc # pnpm configuration
├── package.json # Root dependencies and scripts
├── codex-cli/ # Main package
│ └── package.json # codex-cli specific dependencies
└── docs/ # Documentation (future package)
```
## Configuration files
- **pnpm-workspace.yaml**: Defines the packages included in the monorepo
- **.npmrc**: Configures pnpm behavior
- **Root package.json**: Contains shared scripts and dependencies
## CI/CD
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
## Known issues
If you encounter issues with pnpm, try the following solutions:
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
2. Make sure you're using pnpm 10.8.1 or higher
3. Verify that Node.js 22 or higher is installed

18
codex-cli/package-lock.json generated Normal file
View File

@@ -0,0 +1,18 @@
{
"name": "@openai/codex",
"version": "0.0.0-dev",
"lockfileVersion": 3,
"packages": {
"": {
"name": "@openai/codex",
"version": "0.0.0-dev",
"license": "Apache-2.0",
"bin": {
"codex": "bin/codex.js"
},
"engines": {
"node": ">=16"
}
}
}
}

View File

@@ -17,6 +17,5 @@
"type": "git",
"url": "git+https://github.com/openai/codex.git",
"directory": "codex-cli"
},
"packageManager": "pnpm@10.28.2+sha512.41872f037ad22f7348e3b1debbaf7e867cfd448f2726d9cf74c08f19507c31d2c8e7a11525b983febc2df640b5438dee6023ebb1f84ed43cc2d654d2bc326264"
}
}

624
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ members = [
"arg0",
"feedback",
"codex-backend-openapi-models",
"cloud-requirements",
"cloud-tasks",
"cloud-tasks-client",
"cli",
@@ -48,12 +47,11 @@ members = [
"utils/string",
"codex-client",
"codex-api",
"state",
]
resolver = "2"
[workspace.package]
version = "0.93.0-alpha.21"
version = "0.0.0"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024
@@ -72,7 +70,6 @@ codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-cloud-requirements = { path = "cloud-requirements" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli"}
codex-client = { path = "codex-client" }
@@ -94,7 +91,6 @@ codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-state = { path = "state" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
@@ -130,7 +126,6 @@ clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
crossbeam-channel = "0.5.15"
ctor = "0.6.3"
derive_more = "2"
diffy = "0.4.2"
@@ -164,7 +159,7 @@ maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
notify = "8.2.0"
nucleo = { git = "https://github.com/helix-editor/nucleo.git", rev = "4253de9faabb4e5c6d81d946a5e35a90f87347ee" }
nucleo-matcher = "0.3.1"
once_cell = "1.20.2"
openssl-sys = "*"
opentelemetry = "0.31.0"
@@ -188,7 +183,6 @@ regex = "1.12.2"
regex-lite = "0.1.8"
reqwest = "0.12"
rmcp = { version = "0.12.0", default-features = false }
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.46.0"
@@ -204,7 +198,6 @@ semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
sqlx = { version = "0.8.6", default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio-rustls", "sqlite", "time", "uuid"] }
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
@@ -223,7 +216,7 @@ tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-nativ
tokio-util = "0.7.18"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.44"
tracing = "0.1.43"
tracing-appender = "0.2.3"
tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"

View File

@@ -23,22 +23,11 @@ impl GitSha {
}
}
/// Authentication mode for OpenAI-backed providers.
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
/// OpenAI API key provided by the caller and stored by Codex.
ApiKey,
/// ChatGPT OAuth managed by Codex (tokens persisted and refreshed by Codex).
ChatGPT,
/// [UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.
///
/// ChatGPT auth tokens are supplied by an external host app and are only
/// stored in memory. Token refresh must be handled by the external host app.
#[serde(rename = "chatgptAuthTokens")]
#[ts(rename = "chatgptAuthTokens")]
#[strum(serialize = "chatgptAuthTokens")]
ChatgptAuthTokens,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
@@ -128,10 +117,6 @@ client_request_definitions! {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
ThreadSetName => "thread/name/set" {
params: v2::ThreadSetNameParams,
response: v2::ThreadSetNameResponse,
},
ThreadUnarchive => "thread/unarchive" {
params: v2::ThreadUnarchiveParams,
response: v2::ThreadUnarchiveResponse,
@@ -549,11 +534,6 @@ server_request_definitions! {
response: v2::DynamicToolCallResponse,
},
ChatgptAuthTokensRefresh => "account/chatgptAuthTokens/refresh" {
params: v2::ChatgptAuthTokensRefreshParams,
response: v2::ChatgptAuthTokensRefreshResponse,
},
/// DEPRECATED APIs below
/// Request to approve a patch.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
@@ -598,7 +578,6 @@ server_notification_definitions! {
/// NEW NOTIFICATIONS
Error => "error" (v2::ErrorNotification),
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
ThreadNameUpdated => "thread/name/updated" (v2::ThreadNameUpdatedNotification),
ThreadTokenUsageUpdated => "thread/tokenUsage/updated" (v2::ThreadTokenUsageUpdatedNotification),
TurnStarted => "turn/started" (v2::TurnStartedNotification),
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
@@ -619,7 +598,6 @@ server_notification_definitions! {
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
/// Deprecated: Use `ContextCompaction` item type instead.
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
ConfigWarning => "configWarning" (v2::ConfigWarningNotification),
@@ -774,29 +752,6 @@ mod tests {
Ok(())
}
#[test]
fn serialize_chatgpt_auth_tokens_refresh_request() -> Result<()> {
let request = ServerRequest::ChatgptAuthTokensRefresh {
request_id: RequestId::Integer(8),
params: v2::ChatgptAuthTokensRefreshParams {
reason: v2::ChatgptAuthTokensRefreshReason::Unauthorized,
previous_account_id: Some("org-123".to_string()),
},
};
assert_eq!(
json!({
"method": "account/chatgptAuthTokens/refresh",
"id": 8,
"params": {
"reason": "unauthorized",
"previousAccountId": "org-123"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account_rate_limits() -> Result<()> {
let request = ClientRequest::GetAccountRateLimits {
@@ -886,34 +841,10 @@ mod tests {
Ok(())
}
#[test]
fn serialize_account_login_chatgpt_auth_tokens() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(5),
params: v2::LoginAccountParams::ChatgptAuthTokens {
access_token: "access-token".to_string(),
id_token: "id-token".to_string(),
},
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 5,
"params": {
"type": "chatgptAuthTokens",
"accessToken": "access-token",
"idToken": "id-token"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(6),
request_id: RequestId::Integer(5),
params: v2::GetAccountParams {
refresh_token: false,
},
@@ -921,7 +852,7 @@ mod tests {
assert_eq!(
json!({
"method": "account/read",
"id": 6,
"id": 5,
"params": {
"refreshToken": false
}

View File

@@ -27,12 +27,10 @@ use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SkillDependencies as CoreSkillDependencies;
use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
use codex_protocol::protocol::SkillInterface as CoreSkillInterface;
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
use codex_protocol::protocol::SkillScope as CoreSkillScope;
use codex_protocol::protocol::SkillToolDependency as CoreSkillToolDependency;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
@@ -86,10 +84,6 @@ macro_rules! v2_enum_from_core {
pub enum CodexErrorInfo {
ContextWindowExceeded,
UsageLimitExceeded,
ModelCap {
model: String,
reset_after_seconds: Option<u64>,
},
HttpConnectionFailed {
#[serde(rename = "httpStatusCode")]
#[ts(rename = "httpStatusCode")]
@@ -126,13 +120,6 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
match value {
CoreCodexErrorInfo::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded,
CoreCodexErrorInfo::UsageLimitExceeded => CodexErrorInfo::UsageLimitExceeded,
CoreCodexErrorInfo::ModelCap {
model,
reset_after_seconds,
} => CodexErrorInfo::ModelCap {
model,
reset_after_seconds,
},
CoreCodexErrorInfo::HttpConnectionFailed { http_status_code } => {
CodexErrorInfo::HttpConnectionFailed { http_status_code }
}
@@ -835,24 +822,6 @@ pub enum LoginAccountParams {
#[serde(rename = "chatgpt")]
#[ts(rename = "chatgpt")]
Chatgpt,
/// [UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.
/// The access token must contain the same scopes that Codex-managed ChatGPT auth tokens have.
#[serde(rename = "chatgptAuthTokens")]
#[ts(rename = "chatgptAuthTokens")]
ChatgptAuthTokens {
/// ID token (JWT) supplied by the client.
///
/// This token is used for identity and account metadata (email, plan type,
/// workspace id).
#[serde(rename = "idToken")]
#[ts(rename = "idToken")]
id_token: String,
/// Access token (JWT) supplied by the client.
/// This token is used for backend API requests.
#[serde(rename = "accessToken")]
#[ts(rename = "accessToken")]
access_token: String,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -872,9 +841,6 @@ pub enum LoginAccountResponse {
/// URL the client should open in a browser to initiate the OAuth flow.
auth_url: String,
},
#[serde(rename = "chatgptAuthTokens", rename_all = "camelCase")]
#[ts(rename = "chatgptAuthTokens", rename_all = "camelCase")]
ChatgptAuthTokens {},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -905,37 +871,6 @@ pub struct CancelLoginAccountResponse {
#[ts(export_to = "v2/")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum ChatgptAuthTokensRefreshReason {
/// Codex attempted a backend request and received `401 Unauthorized`.
Unauthorized,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ChatgptAuthTokensRefreshParams {
pub reason: ChatgptAuthTokensRefreshReason,
/// Workspace/account identifier that Codex was previously using.
///
/// Clients that manage multiple accounts/workspaces can use this as a hint
/// to refresh the token for the correct workspace.
///
/// This may be `null` when the prior ID token did not include a workspace
/// identifier (`chatgpt_account_id`) or when the token could not be parsed.
pub previous_account_id: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ChatgptAuthTokensRefreshResponse {
pub id_token: String,
pub access_token: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -947,11 +882,6 @@ pub struct GetAccountRateLimitsResponse {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountParams {
/// When `true`, requests a proactive token refresh before returning.
///
/// In managed auth mode this triggers the normal refresh-token flow. In
/// external auth mode this flag is ignored. Clients should refresh tokens
/// themselves and call `account/login/start` with `chatgptAuthTokens`.
#[serde(default)]
pub refresh_token: bool,
}
@@ -1071,8 +1001,6 @@ pub struct AppInfo {
pub name: String,
pub description: Option<String>,
pub logo_url: Option<String>,
pub logo_url_dark: Option<String>,
pub distribution_channel: Option<String>,
pub install_url: Option<String>,
#[serde(default)]
pub is_accessible: bool,
@@ -1295,14 +1223,6 @@ pub struct ThreadArchiveParams {
#[ts(export_to = "v2/")]
pub struct ThreadArchiveResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadSetNameParams {
pub thread_id: String,
pub name: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1310,11 +1230,6 @@ pub struct ThreadUnarchiveParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadSetNameResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1480,14 +1395,11 @@ pub struct SkillMetadata {
pub description: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
/// Legacy short_description from SKILL.md. Prefer SKILL.json interface.short_description.
/// Legacy short_description from SKILL.md. Prefer SKILL.toml interface.short_description.
pub short_description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interface: Option<SkillInterface>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub dependencies: Option<SkillDependencies>,
pub path: PathBuf,
pub scope: SkillScope,
pub enabled: bool,
@@ -1511,35 +1423,6 @@ pub struct SkillInterface {
pub default_prompt: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillDependencies {
pub tools: Vec<SkillToolDependency>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillToolDependency {
#[serde(rename = "type")]
#[ts(rename = "type")]
pub r#type: String,
pub value: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub transport: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1579,7 +1462,6 @@ impl From<CoreSkillMetadata> for SkillMetadata {
description: value.description,
short_description: value.short_description,
interface: value.interface.map(SkillInterface::from),
dependencies: value.dependencies.map(SkillDependencies::from),
path: value.path,
scope: value.scope.into(),
enabled: true,
@@ -1600,31 +1482,6 @@ impl From<CoreSkillInterface> for SkillInterface {
}
}
impl From<CoreSkillDependencies> for SkillDependencies {
fn from(value: CoreSkillDependencies) -> Self {
Self {
tools: value
.tools
.into_iter()
.map(SkillToolDependency::from)
.collect(),
}
}
}
impl From<CoreSkillToolDependency> for SkillToolDependency {
fn from(value: CoreSkillToolDependency) -> Self {
Self {
r#type: value.r#type,
value: value.value,
description: value.description,
transport: value.transport,
command: value.command,
url: value.url,
}
}
}
impl From<CoreSkillScope> for SkillScope {
fn from(value: CoreSkillScope) -> Self {
match value {
@@ -1980,10 +1837,6 @@ pub enum UserInput {
name: String,
path: PathBuf,
},
Mention {
name: String,
path: String,
},
}
impl UserInput {
@@ -1999,7 +1852,6 @@ impl UserInput {
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
UserInput::Skill { name, path } => CoreUserInput::Skill { name, path },
UserInput::Mention { name, path } => CoreUserInput::Mention { name, path },
}
}
}
@@ -2017,7 +1869,6 @@ impl From<CoreUserInput> for UserInput {
CoreUserInput::Image { image_url } => UserInput::Image { url: image_url },
CoreUserInput::LocalImage { path } => UserInput::LocalImage { path },
CoreUserInput::Skill { name, path } => UserInput::Skill { name, path },
CoreUserInput::Mention { name, path } => UserInput::Mention { name, path },
_ => unreachable!("unsupported user input variant"),
}
}
@@ -2118,9 +1969,6 @@ pub enum ThreadItem {
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ExitedReviewMode { id: String, review: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ContextCompaction { id: String },
}
impl From<CoreTurnItem> for ThreadItem {
@@ -2149,9 +1997,6 @@ impl From<CoreTurnItem> for ThreadItem {
id: search.id,
query: search.query,
},
CoreTurnItem::ContextCompaction(compaction) => {
ThreadItem::ContextCompaction { id: compaction.id }
}
}
}
}
@@ -2298,16 +2143,6 @@ pub struct ThreadStartedNotification {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadNameUpdatedNotification {
pub thread_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub thread_name: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2524,7 +2359,6 @@ pub struct WindowsWorldWritableWarningNotification {
pub failed_scan: bool,
}
/// Deprecated: Use `ContextCompaction` item type instead.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2616,15 +2450,13 @@ pub struct ToolRequestUserInputOption {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Represents one request_user_input question and its required options.
/// EXPERIMENTAL. Represents one request_user_input question and its optional options.
pub struct ToolRequestUserInputQuestion {
pub id: String,
pub header: String,
pub question: String,
#[serde(default)]
pub is_other: bool,
#[serde(default)]
pub is_secret: bool,
pub options: Option<Vec<ToolRequestUserInputOption>>,
}
@@ -2789,7 +2621,6 @@ mod tests {
use codex_protocol::items::TurnItem;
use codex_protocol::items::UserMessageItem;
use codex_protocol::items::WebSearchItem;
use codex_protocol::models::WebSearchAction;
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
use codex_protocol::user_input::UserInput as CoreUserInput;
use pretty_assertions::assert_eq;
@@ -2833,10 +2664,6 @@ mod tests {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
CoreUserInput::Mention {
name: "Demo App".to_string(),
path: "app://demo-app".to_string(),
},
],
});
@@ -2859,10 +2686,6 @@ mod tests {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
UserInput::Mention {
name: "Demo App".to_string(),
path: "app://demo-app".to_string(),
},
],
}
);
@@ -2905,9 +2728,6 @@ mod tests {
let search_item = TurnItem::WebSearch(WebSearchItem {
id: "search-1".to_string(),
query: "docs".to_string(),
action: WebSearchAction::Search {
query: Some("docs".to_string()),
},
});
assert_eq!(

View File

@@ -17,7 +17,6 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
async-trait = { workspace = true }
codex-arg0 = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
@@ -57,7 +56,6 @@ axum = { workspace = true, default-features = false, features = [
"tokio",
] }
base64 = { workspace = true }
codex-execpolicy = { workspace = true }
core_test_support = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }

View File

@@ -13,7 +13,6 @@
- [Events](#events)
- [Approvals](#approvals)
- [Skills](#skills)
- [Apps](#apps)
- [Auth endpoints](#auth-endpoints)
## Protocol
@@ -82,7 +81,6 @@ Example (from OpenAI's official VSCode extension):
- `thread/loaded/list` — list the thread ids currently loaded in memory.
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `thread/name/set` — set or update a threads user-facing name; returns `{}` on success. Thread names are not required to be unique; name lookups resolve to the most recently updated thread.
- `thread/unarchive` — move an archived rollout file back into the sessions directory; returns the restored `thread` on success.
- `thread/rollback` — drop the last N turns from the agents in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
@@ -298,26 +296,6 @@ Invoke a skill explicitly by including `$<skill-name>` in the text input and add
} } }
```
### Example: Start a turn (invoke an app)
Invoke an app by including `$<app-slug>` in the text input and adding a `mention` input item with the app id in `app://<connector-id>` form.
```json
{ "method": "turn/start", "id": 34, "params": {
"threadId": "thr_123",
"input": [
{ "type": "text", "text": "$demo-app Summarize the latest updates." },
{ "type": "mention", "name": "Demo App", "path": "app://demo-app" }
]
} }
{ "id": 34, "result": { "turn": {
"id": "turn_458",
"status": "inProgress",
"items": [],
"error": null
} } }
```
### Example: Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
@@ -453,8 +431,7 @@ Today both notifications carry an empty `items` array even when item events were
- `imageView``{id, path}` emitted when the agent invokes the image viewer tool.
- `enteredReviewMode``{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
- `exitedReviewMode``{id, review}` emitted when the reviewer finishes; `review` is the full plain-text review (usually, overall notes plus bullet point findings).
- `contextCompaction` `{id}` emitted when codex compacts the conversation history. This can happen automatically.
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically. **Deprecated:** Use `contextCompaction` instead.
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
All items emit two shared lifecycle events:
@@ -605,72 +582,14 @@ To enable or disable a skill by path:
}
```
## Apps
Use `app/list` to fetch available apps (connectors). Each entry includes metadata like the app `id`, display `name`, `installUrl`, and whether it is currently accessible.
```json
{ "method": "app/list", "id": 50, "params": {
"cursor": null,
"limit": 50
} }
{ "id": 50, "result": {
"data": [
{
"id": "demo-app",
"name": "Demo App",
"description": "Example connector for documentation.",
"logoUrl": "https://example.com/demo-app.png",
"logoUrlDark": null,
"distributionChannel": null,
"installUrl": "https://chatgpt.com/apps/demo-app/demo-app",
"isAccessible": true
}
],
"nextCursor": null
} }
```
Invoke an app by inserting `$<app-slug>` in the text input. The slug is derived from the app name and lowercased with non-alphanumeric characters replaced by `-` (for example, "Demo App" becomes `$demo-app`). Add a `mention` input item (recommended) so the server uses the exact `app://<connector-id>` path rather than guessing by name.
Example:
```
$demo-app Pull the latest updates from the team.
```
```json
{
"method": "turn/start",
"id": 51,
"params": {
"threadId": "thread-1",
"input": [
{
"type": "text",
"text": "$demo-app Pull the latest updates from the team."
},
{ "type": "mention", "name": "Demo App", "path": "app://demo-app" }
]
}
}
```
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### Authentication modes
Codex supports these authentication modes. The current mode is surfaced in `account/updated` (`authMode`) and can be inferred from `account/read`.
- **API key (`apiKey`)**: Caller supplies an OpenAI API key via `account/login/start` with `type: "apiKey"`. The API key is saved and used for API requests.
- **ChatGPT managed (`chatgpt`)** (recommended): Codex owns the ChatGPT OAuth flow and refresh tokens. Start via `account/login/start` with `type: "chatgpt"`; Codex persists tokens to disk and refreshes them automatically.
### API Overview
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey`, `chatgpt`).
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
- `account/login/cancel` — cancel a pending ChatGPT login by `loginId`.
- `account/logout` — sign out; triggers `account/updated`.

View File

@@ -52,7 +52,6 @@ use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::TerminalInteractionNotification;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadNameUpdatedNotification;
use codex_app_server_protocol::ThreadRollbackResponse;
use codex_app_server_protocol::ThreadTokenUsage;
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
@@ -280,7 +279,6 @@ pub(crate) async fn apply_bespoke_event_handling(
header: question.header,
question: question.question,
is_other: question.is_other,
is_secret: question.is_secret,
options: question.options.map(|options| {
options
.into_iter()
@@ -1098,17 +1096,6 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing.send_response(request_id, response).await;
}
}
EventMsg::ThreadNameUpdated(thread_name_event) => {
if let ApiVersion::V2 = api_version {
let notification = ThreadNameUpdatedNotification {
thread_id: thread_name_event.thread_id.to_string(),
thread_name: thread_name_event.thread_name,
};
outgoing
.send_server_notification(ServerNotification::ThreadNameUpdated(notification))
.await;
}
}
EventMsg::TurnDiff(turn_diff_event) => {
handle_turn_diff(
conversation_id,

View File

@@ -13,6 +13,7 @@ use codex_app_server_protocol::AccountLoginCompletedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
use codex_app_server_protocol::AppInfo as ApiAppInfo;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::AppsListResponse;
use codex_app_server_protocol::ArchiveConversationParams;
@@ -57,7 +58,6 @@ use codex_app_server_protocol::ListConversationsResponse;
use codex_app_server_protocol::ListMcpServerStatusParams;
use codex_app_server_protocol::ListMcpServerStatusResponse;
use codex_app_server_protocol::LoginAccountParams;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
@@ -110,8 +110,6 @@ use codex_app_server_protocol::ThreadReadResponse;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadRollbackParams;
use codex_app_server_protocol::ThreadSetNameParams;
use codex_app_server_protocol::ThreadSetNameResponse;
use codex_app_server_protocol::ThreadSortKey;
use codex_app_server_protocol::ThreadSourceKind;
use codex_app_server_protocol::ThreadStartParams;
@@ -133,7 +131,6 @@ use codex_app_server_protocol::build_turns_from_event_msgs;
use codex_backend_client::Client as BackendClient;
use codex_chatgpt::connectors;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::CodexThread;
use codex_core::Cursor as RolloutCursor;
use codex_core::InitialHistory;
@@ -145,7 +142,6 @@ use codex_core::ThreadManager;
use codex_core::ThreadSortKey as CoreThreadSortKey;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::login_with_api_key;
use codex_core::auth::login_with_chatgpt_auth_tokens;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigService;
@@ -173,9 +169,6 @@ use codex_core::read_head_for_summary;
use codex_core::read_session_meta_line;
use codex_core::rollout_date_parts;
use codex_core::sandboxing::SandboxPermissions;
use codex_core::state_db::get_state_db;
use codex_core::token_data::parse_id_token;
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_feedback::CodexFeedback;
use codex_login::ServerOptions as LoginServerOptions;
use codex_login::ShutdownHandle;
@@ -183,7 +176,6 @@ use codex_login::run_login_server;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::dynamic_tools::DynamicToolSpec as CoreDynamicToolSpec;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
@@ -419,9 +411,6 @@ impl CodexMessageProcessor {
ClientRequest::ThreadArchive { request_id, params } => {
self.thread_archive(request_id, params).await;
}
ClientRequest::ThreadSetName { request_id, params } => {
self.thread_set_name(request_id, params).await;
}
ClientRequest::ThreadUnarchive { request_id, params } => {
self.thread_unarchive(request_id, params).await;
}
@@ -616,22 +605,6 @@ impl CodexMessageProcessor {
LoginAccountParams::Chatgpt => {
self.login_chatgpt_v2(request_id).await;
}
LoginAccountParams::ChatgptAuthTokens {
id_token,
access_token,
} => {
self.login_chatgpt_auth_tokens(request_id, id_token, access_token)
.await;
}
}
}
fn external_auth_active_error(&self) -> JSONRPCErrorError {
JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "External auth is active. Use account/login/start (chatgptAuthTokens) to update it or account/logout to clear it."
.to_string(),
data: None,
}
}
@@ -639,10 +612,6 @@ impl CodexMessageProcessor {
&mut self,
params: &LoginApiKeyParams,
) -> std::result::Result<(), JSONRPCErrorError> {
if self.auth_manager.is_external_auth_active() {
return Err(self.external_auth_active_error());
}
if matches!(
self.config.forced_login_method,
Some(ForcedLoginMethod::Chatgpt)
@@ -687,11 +656,7 @@ impl CodexMessageProcessor {
.await;
let payload = AuthStatusChangeNotification {
auth_method: self
.auth_manager
.auth_cached()
.as_ref()
.map(CodexAuth::api_auth_mode),
auth_method: self.auth_manager.auth_cached().map(|auth| auth.mode),
};
self.outgoing
.send_server_notification(ServerNotification::AuthStatusChange(payload))
@@ -721,11 +686,7 @@ impl CodexMessageProcessor {
.await;
let payload_v2 = AccountUpdatedNotification {
auth_mode: self
.auth_manager
.auth_cached()
.as_ref()
.map(CodexAuth::api_auth_mode),
auth_mode: self.auth_manager.auth_cached().map(|auth| auth.mode),
};
self.outgoing
.send_server_notification(ServerNotification::AccountUpdated(payload_v2))
@@ -743,10 +704,6 @@ impl CodexMessageProcessor {
) -> std::result::Result<LoginServerOptions, JSONRPCErrorError> {
let config = self.config.as_ref();
if self.auth_manager.is_external_auth_active() {
return Err(self.external_auth_active_error());
}
if matches!(config.forced_login_method, Some(ForcedLoginMethod::Api)) {
return Err(JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
@@ -821,10 +778,7 @@ impl CodexMessageProcessor {
auth_manager.reload();
// Notify clients with the actual current auth mode.
let current_auth_method = auth_manager
.auth_cached()
.as_ref()
.map(CodexAuth::api_auth_mode);
let current_auth_method = auth_manager.auth_cached().map(|a| a.mode);
let payload = AuthStatusChangeNotification {
auth_method: current_auth_method,
};
@@ -914,10 +868,7 @@ impl CodexMessageProcessor {
auth_manager.reload();
// Notify clients with the actual current auth mode.
let current_auth_method = auth_manager
.auth_cached()
.as_ref()
.map(CodexAuth::api_auth_mode);
let current_auth_method = auth_manager.auth_cached().map(|a| a.mode);
let payload_v2 = AccountUpdatedNotification {
auth_mode: current_auth_method,
};
@@ -1011,98 +962,6 @@ impl CodexMessageProcessor {
}
}
async fn login_chatgpt_auth_tokens(
&mut self,
request_id: RequestId,
id_token: String,
access_token: String,
) {
if matches!(
self.config.forced_login_method,
Some(ForcedLoginMethod::Api)
) {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "External ChatGPT auth is disabled. Use API key login instead."
.to_string(),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
// Cancel any active login attempt to avoid persisting managed auth state.
{
let mut guard = self.active_login.lock().await;
if let Some(active) = guard.take() {
drop(active);
}
}
let id_token_info = match parse_id_token(&id_token) {
Ok(info) => info,
Err(err) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid id token: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
};
if let Some(expected_workspace) = self.config.forced_chatgpt_workspace_id.as_deref()
&& id_token_info.chatgpt_account_id.as_deref() != Some(expected_workspace)
{
let account_id = id_token_info.chatgpt_account_id;
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!(
"External auth must use workspace {expected_workspace}, but received {account_id:?}."
),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
if let Err(err) =
login_with_chatgpt_auth_tokens(&self.config.codex_home, &id_token, &access_token)
{
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to set external auth: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
self.auth_manager.reload();
self.outgoing
.send_response(request_id, LoginAccountResponse::ChatgptAuthTokens {})
.await;
let payload_login_completed = AccountLoginCompletedNotification {
login_id: None,
success: true,
error: None,
};
self.outgoing
.send_server_notification(ServerNotification::AccountLoginCompleted(
payload_login_completed,
))
.await;
let payload_v2 = AccountUpdatedNotification {
auth_mode: self.auth_manager.get_auth_mode(),
};
self.outgoing
.send_server_notification(ServerNotification::AccountUpdated(payload_v2))
.await;
}
async fn logout_common(&mut self) -> std::result::Result<Option<AuthMode>, JSONRPCErrorError> {
// Cancel any active login attempt.
{
@@ -1121,11 +980,7 @@ impl CodexMessageProcessor {
}
// Reflect the current auth method after logout (likely None).
Ok(self
.auth_manager
.auth_cached()
.as_ref()
.map(CodexAuth::api_auth_mode))
Ok(self.auth_manager.auth_cached().map(|auth| auth.mode))
}
async fn logout_v1(&mut self, request_id: RequestId) {
@@ -1169,9 +1024,6 @@ impl CodexMessageProcessor {
}
async fn refresh_token_if_requested(&self, do_refresh: bool) {
if self.auth_manager.is_external_auth_active() {
return;
}
if do_refresh && let Err(err) = self.auth_manager.refresh_token().await {
tracing::warn!("failed to refresh token while getting account: {err}");
}
@@ -1197,7 +1049,7 @@ impl CodexMessageProcessor {
} else {
match self.auth_manager.auth().await {
Some(auth) => {
let auth_mode = auth.api_auth_mode();
let auth_mode = auth.mode;
let (reported_auth_method, token_opt) = match auth.get_token() {
Ok(token) if !token.is_empty() => {
let tok = if include_token { Some(token) } else { None };
@@ -1244,9 +1096,9 @@ impl CodexMessageProcessor {
}
let account = match self.auth_manager.auth_cached() {
Some(auth) => Some(match auth {
CodexAuth::ApiKey(_) => Account::ApiKey {},
CodexAuth::ChatGpt(_) | CodexAuth::ChatGptAuthTokens(_) => {
Some(auth) => Some(match auth.mode {
AuthMode::ApiKey => Account::ApiKey {},
AuthMode::ChatGPT => {
let email = auth.get_account_email();
let plan_type = auth.account_plan_type();
@@ -1305,7 +1157,7 @@ impl CodexMessageProcessor {
});
};
if !auth.is_chatgpt_auth() {
if auth.mode != AuthMode::ChatGPT {
return Err(JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "chatgpt authentication required to read rate limits".to_string(),
@@ -1407,14 +1259,12 @@ impl CodexMessageProcessor {
let timeout_ms = params
.timeout_ms
.and_then(|timeout_ms| u64::try_from(timeout_ms).ok());
let windows_sandbox_level = WindowsSandboxLevel::from_config(&self.config);
let exec_params = ExecParams {
command: params.command,
cwd,
expiration: timeout_ms.into(),
env,
sandbox_permissions: SandboxPermissions::UseDefault,
windows_sandbox_level,
justification: None,
arg0: None,
};
@@ -1755,7 +1605,6 @@ impl CodexMessageProcessor {
}
async fn thread_archive(&mut self, request_id: RequestId, params: ThreadArchiveParams) {
// TODO(jif) mostly rewrite this using sqlite after phase 1
let thread_id = match ThreadId::from_string(&params.thread_id) {
Ok(id) => id,
Err(err) => {
@@ -1804,38 +1653,7 @@ impl CodexMessageProcessor {
}
}
async fn thread_set_name(&self, request_id: RequestId, params: ThreadSetNameParams) {
let ThreadSetNameParams { thread_id, name } = params;
let Some(name) = codex_core::util::normalize_thread_name(&name) else {
self.send_invalid_request_error(
request_id,
"thread name must not be empty".to_string(),
)
.await;
return;
};
let (_, thread) = match self.load_thread(&thread_id).await {
Ok(v) => v,
Err(error) => {
self.outgoing.send_error(request_id, error).await;
return;
}
};
if let Err(err) = thread.submit(Op::SetThreadName { name }).await {
self.send_internal_error(request_id, format!("failed to set thread name: {err}"))
.await;
return;
}
self.outgoing
.send_response(request_id, ThreadSetNameResponse {})
.await;
}
async fn thread_unarchive(&mut self, request_id: RequestId, params: ThreadUnarchiveParams) {
// TODO(jif) mostly rewrite this using sqlite after phase 1
let thread_id = match ThreadId::from_string(&params.thread_id) {
Ok(id) => id,
Err(err) => {
@@ -1878,7 +1696,6 @@ impl CodexMessageProcessor {
let rollout_path_display = archived_path.display().to_string();
let fallback_provider = self.config.model_provider_id.clone();
let state_db_ctx = get_state_db(&self.config, None).await;
let archived_folder = self
.config
.codex_home
@@ -1957,11 +1774,6 @@ impl CodexMessageProcessor {
message: format!("failed to unarchive thread: {err}"),
data: None,
})?;
if let Some(ctx) = state_db_ctx {
let _ = ctx
.mark_unarchived(thread_id, restored_path.as_path())
.await;
}
let summary =
read_summary_from_rollout(restored_path.as_path(), fallback_provider.as_str())
.await
@@ -2691,6 +2503,7 @@ impl CodexMessageProcessor {
};
let fallback_provider = self.config.model_provider_id.as_str();
match read_summary_from_rollout(&path, fallback_provider).await {
Ok(summary) => {
let response = GetConversationSummaryResponse { summary };
@@ -3713,13 +3526,8 @@ impl CodexMessageProcessor {
});
}
let mut state_db_ctx = None;
// If the thread is active, request shutdown and wait briefly.
if let Some(conversation) = self.thread_manager.remove_thread(&thread_id).await {
if let Some(ctx) = conversation.state_db() {
state_db_ctx = Some(ctx);
}
info!("thread {thread_id} was active; shutting down");
// Request shutdown.
match conversation.submit(Op::Shutdown).await {
@@ -3746,24 +3554,14 @@ impl CodexMessageProcessor {
}
}
if state_db_ctx.is_none() {
state_db_ctx = get_state_db(&self.config, None).await;
}
// Move the rollout file to archived.
let result: std::io::Result<()> = async move {
let result: std::io::Result<()> = async {
let archive_folder = self
.config
.codex_home
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
tokio::fs::create_dir_all(&archive_folder).await?;
let archived_path = archive_folder.join(&file_name);
tokio::fs::rename(&canonical_rollout_path, &archived_path).await?;
if let Some(ctx) = state_db_ctx {
let _ = ctx
.mark_archived(thread_id, archived_path.as_path(), Utc::now())
.await;
}
tokio::fs::rename(&canonical_rollout_path, &archive_folder.join(&file_name)).await?;
Ok(())
}
.await;
@@ -3807,10 +3605,7 @@ impl CodexMessageProcessor {
// Submit user input to the conversation.
let _ = conversation
.submit(Op::UserInput {
items: mapped_items,
final_output_json_schema: None,
})
.submit_user_turn_with_defaults(mapped_items, None)
.await;
// Acknowledge with an empty result.
@@ -3859,6 +3654,7 @@ impl CodexMessageProcessor {
let _ = conversation
.submit(Op::UserTurn {
use_thread_defaults: false,
items: mapped_items,
cwd,
approval_policy,
@@ -3887,7 +3683,7 @@ impl CodexMessageProcessor {
}
};
if !config.features.enabled(Feature::Apps) {
if !config.features.enabled(Feature::Connectors) {
self.outgoing
.send_response(
request_id,
@@ -3950,7 +3746,18 @@ impl CodexMessageProcessor {
}
let end = start.saturating_add(effective_limit).min(total);
let data = connectors[start..end].to_vec();
let data = connectors[start..end]
.iter()
.cloned()
.map(|connector| ApiAppInfo {
id: connector.connector_id,
name: connector.connector_name,
description: connector.connector_description,
logo_url: connector.logo_url,
install_url: connector.install_url,
is_accessible: connector.is_accessible,
})
.collect();
let next_cursor = if end < total {
Some(end.to_string())
@@ -4062,37 +3869,35 @@ impl CodexMessageProcessor {
.map(V2UserInput::into_core)
.collect();
let has_any_overrides = params.cwd.is_some()
|| params.approval_policy.is_some()
|| params.sandbox_policy.is_some()
|| params.model.is_some()
|| params.effort.is_some()
|| params.summary.is_some()
|| params.collaboration_mode.is_some()
|| params.personality.is_some();
// If any overrides are provided, update the session turn context first.
if has_any_overrides {
let _ = thread
.submit(Op::OverrideTurnContext {
cwd: params.cwd,
approval_policy: params.approval_policy.map(AskForApproval::to_core),
sandbox_policy: params.sandbox_policy.map(|p| p.to_core()),
windows_sandbox_level: None,
model: params.model,
effort: params.effort.map(Some),
summary: params.summary,
collaboration_mode: params.collaboration_mode,
personality: params.personality,
})
.await;
}
// Start the turn by submitting the user input. Return its submission id as turn_id.
// Start the turn by submitting the user input with the thread's current defaults,
// overridden by any per-turn parameters.
let snapshot = thread.config_snapshot().await;
let cwd = params.cwd.unwrap_or(snapshot.cwd);
let approval_policy = params
.approval_policy
.map(AskForApproval::to_core)
.unwrap_or(snapshot.approval_policy);
let sandbox_policy = params
.sandbox_policy
.map(|policy| policy.to_core())
.unwrap_or(snapshot.sandbox_policy);
let model = params.model.unwrap_or(snapshot.model);
let effort = params.effort.or(snapshot.reasoning_effort);
let summary = params.summary.unwrap_or(snapshot.reasoning_summary);
let personality = params.personality.or(snapshot.personality);
let turn_id = thread
.submit(Op::UserInput {
.submit(Op::UserTurn {
use_thread_defaults: false,
items: mapped_items,
cwd,
approval_policy,
sandbox_policy,
model,
effort,
summary,
final_output_json_schema: params.output_schema,
collaboration_mode: params.collaboration_mode,
personality,
})
.await;
@@ -4709,22 +4514,6 @@ fn skills_to_info(
default_prompt: interface.default_prompt,
}
}),
dependencies: skill.dependencies.clone().map(|dependencies| {
codex_app_server_protocol::SkillDependencies {
tools: dependencies
.tools
.into_iter()
.map(|tool| codex_app_server_protocol::SkillToolDependency {
r#type: tool.r#type,
value: tool.value,
description: tool.description,
transport: tool.transport,
command: tool.command,
url: tool.url,
})
.collect(),
}
}),
path: skill.path.clone(),
scope: skill.scope.into(),
enabled,

View File

@@ -312,7 +312,7 @@ pub async fn run_main(
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e).await,
JSONRPCMessage::Error(e) => processor.process_error(e),
}
}
created = thread_created_rx.recv(), if listen_for_threads => {

View File

@@ -5,10 +5,6 @@ use crate::codex_message_processor::CodexMessageProcessor;
use crate::config_api::ConfigApi;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::OutgoingMessageSender;
use async_trait::async_trait;
use codex_app_server_protocol::ChatgptAuthTokensRefreshParams;
use codex_app_server_protocol::ChatgptAuthTokensRefreshReason;
use codex_app_server_protocol::ChatgptAuthTokensRefreshResponse;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::ConfigBatchWriteParams;
@@ -23,13 +19,8 @@ use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_core::AuthManager;
use codex_core::ThreadManager;
use codex_core::auth::ExternalAuthRefreshContext;
use codex_core::auth::ExternalAuthRefreshReason;
use codex_core::auth::ExternalAuthRefresher;
use codex_core::auth::ExternalAuthTokens;
use codex_core::config::Config;
use codex_core::config_loader::LoaderOverrides;
use codex_core::default_client::SetOriginatorError;
@@ -40,64 +31,8 @@ use codex_feedback::CodexFeedback;
use codex_protocol::ThreadId;
use codex_protocol::protocol::SessionSource;
use tokio::sync::broadcast;
use tokio::time::Duration;
use tokio::time::timeout;
use toml::Value as TomlValue;
const EXTERNAL_AUTH_REFRESH_TIMEOUT: Duration = Duration::from_secs(10);
#[derive(Clone)]
struct ExternalAuthRefreshBridge {
outgoing: Arc<OutgoingMessageSender>,
}
impl ExternalAuthRefreshBridge {
fn map_reason(reason: ExternalAuthRefreshReason) -> ChatgptAuthTokensRefreshReason {
match reason {
ExternalAuthRefreshReason::Unauthorized => ChatgptAuthTokensRefreshReason::Unauthorized,
}
}
}
#[async_trait]
impl ExternalAuthRefresher for ExternalAuthRefreshBridge {
async fn refresh(
&self,
context: ExternalAuthRefreshContext,
) -> std::io::Result<ExternalAuthTokens> {
let params = ChatgptAuthTokensRefreshParams {
reason: Self::map_reason(context.reason),
previous_account_id: context.previous_account_id,
};
let (request_id, rx) = self
.outgoing
.send_request_with_id(ServerRequestPayload::ChatgptAuthTokensRefresh(params))
.await;
let result = match timeout(EXTERNAL_AUTH_REFRESH_TIMEOUT, rx).await {
Ok(result) => result.map_err(|err| {
std::io::Error::other(format!("auth refresh request canceled: {err}"))
})?,
Err(_) => {
let _canceled = self.outgoing.cancel_request(&request_id).await;
return Err(std::io::Error::other(format!(
"auth refresh request timed out after {}s",
EXTERNAL_AUTH_REFRESH_TIMEOUT.as_secs()
)));
}
};
let response: ChatgptAuthTokensRefreshResponse =
serde_json::from_value(result).map_err(std::io::Error::other)?;
Ok(ExternalAuthTokens {
access_token: response.access_token,
id_token: response.id_token,
})
}
}
pub(crate) struct MessageProcessor {
outgoing: Arc<OutgoingMessageSender>,
codex_message_processor: CodexMessageProcessor,
@@ -124,10 +59,6 @@ impl MessageProcessor {
false,
config.cli_auth_credentials_store_mode,
);
auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id.clone());
auth_manager.set_external_auth_refresher(Arc::new(ExternalAuthRefreshBridge {
outgoing: outgoing.clone(),
}));
let thread_manager = Arc::new(ThreadManager::new(
config.codex_home.clone(),
auth_manager.clone(),
@@ -305,9 +236,8 @@ impl MessageProcessor {
}
/// Handle an error object received from the peer.
pub(crate) async fn process_error(&mut self, err: JSONRPCError) {
pub(crate) fn process_error(&mut self, err: JSONRPCError) {
tracing::error!("<- error: {:?}", err);
self.outgoing.notify_client_error(err.id, err.error).await;
}
async fn handle_config_read(&self, request_id: RequestId, params: ConfigReadParams) {

View File

@@ -39,14 +39,6 @@ impl OutgoingMessageSender {
&self,
request: ServerRequestPayload,
) -> oneshot::Receiver<Result> {
let (_id, rx) = self.send_request_with_id(request).await;
rx
}
pub(crate) async fn send_request_with_id(
&self,
request: ServerRequestPayload,
) -> (RequestId, oneshot::Receiver<Result>) {
let id = RequestId::Integer(self.next_request_id.fetch_add(1, Ordering::Relaxed));
let outgoing_message_id = id.clone();
let (tx_approve, rx_approve) = oneshot::channel();
@@ -62,7 +54,7 @@ impl OutgoingMessageSender {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove(&outgoing_message_id);
}
(outgoing_message_id, rx_approve)
rx_approve
}
pub(crate) async fn notify_client_response(&self, id: RequestId, result: Result) {
@@ -83,30 +75,6 @@ impl OutgoingMessageSender {
}
}
pub(crate) async fn notify_client_error(&self, id: RequestId, error: JSONRPCErrorError) {
let entry = {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove_entry(&id)
};
match entry {
Some((id, _sender)) => {
warn!("client responded with error for {id:?}: {error:?}");
}
None => {
warn!("could not find callback for {id:?}");
}
}
}
pub(crate) async fn cancel_request(&self, id: &RequestId) -> bool {
let entry = {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove_entry(id)
};
entry.is_some()
}
pub(crate) async fn send_response<T: Serialize>(&self, id: RequestId, response: T) {
match serde_json::to_value(response) {
Ok(result) => {

View File

@@ -6,7 +6,6 @@ use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::AuthMode;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::save_auth;
@@ -159,7 +158,6 @@ pub fn write_chatgpt_auth(
let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now()));
let auth = AuthDotJson {
auth_mode: Some(AuthMode::ChatGPT),
openai_api_key: None,
tokens: Some(tokens),
last_refresh,

View File

@@ -1,72 +0,0 @@
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use std::collections::BTreeMap;
use std::path::Path;
pub fn write_mock_responses_config_toml(
codex_home: &Path,
server_uri: &str,
feature_flags: &BTreeMap<Feature, bool>,
auto_compact_limit: i64,
requires_openai_auth: Option<bool>,
model_provider_id: &str,
compact_prompt: &str,
) -> std::io::Result<()> {
// Phase 1: build the features block for config.toml.
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
// Phase 2: build provider-specific config bits.
let requires_line = match requires_openai_auth {
Some(true) => "requires_openai_auth = true\n".to_string(),
Some(false) | None => String::new(),
};
let provider_block = if model_provider_id == "openai" {
String::new()
} else {
format!(
r#"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
{requires_line}
"#
)
};
// Phase 3: write the final config file.
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
compact_prompt = "{compact_prompt}"
model_auto_compact_token_limit = {auto_compact_limit}
model_provider = "{model_provider_id}"
[features]
{feature_entries}
{provider_block}
"#
),
)
}

View File

@@ -1,5 +1,4 @@
mod auth_fixtures;
mod config;
mod mcp_process;
mod mock_model_server;
mod models_cache;
@@ -11,7 +10,6 @@ pub use auth_fixtures::ChatGptIdTokenClaims;
pub use auth_fixtures::encode_id_token;
pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use config::write_mock_responses_config_toml;
pub use core_test_support::format_with_current_shell;
pub use core_test_support::format_with_current_shell_display;
pub use core_test_support::format_with_current_shell_display_non_login;

View File

@@ -29,13 +29,11 @@ use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::LoginAccountParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
@@ -300,20 +298,6 @@ impl McpProcess {
self.send_request("account/read", params).await
}
/// Send an `account/login/start` JSON-RPC request with ChatGPT auth tokens.
pub async fn send_chatgpt_auth_tokens_login_request(
&mut self,
id_token: String,
access_token: String,
) -> anyhow::Result<i64> {
let params = LoginAccountParams::ChatgptAuthTokens {
id_token,
access_token,
};
let params = Some(serde_json::to_value(params)?);
self.send_request("account/login/start", params).await
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
&mut self,
@@ -624,15 +608,6 @@ impl McpProcess {
.await
}
pub async fn send_error(
&mut self,
id: RequestId,
error: JSONRPCErrorError,
) -> anyhow::Result<()> {
self.send_jsonrpc_message(JSONRPCMessage::Error(JSONRPCError { id, error }))
.await
}
pub async fn send_notification(
&mut self,
notification: ClientNotification,

View File

@@ -27,7 +27,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.into()),
base_instructions: "base instructions".to_string(),
model_messages: None,
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -67,6 +67,7 @@ pub fn create_request_user_input_sse_response(call_id: &str) -> anyhow::Result<S
"id": "confirm_path",
"header": "Confirm",
"question": "Proceed with the plan?",
"isOther": false,
"options": [{
"label": "Yes (Recommended)",
"description": "Continue the current plan."

View File

@@ -81,7 +81,6 @@ pub fn create_fake_rollout_with_source(
source,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
dynamic_tools: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
@@ -160,7 +159,6 @@ pub fn create_fake_rollout_with_text_elements(
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
dynamic_tools: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,

View File

@@ -48,7 +48,8 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
.await??;
let value = resp.result;
let expected_score = 72;
// The path separator on Windows affects the score.
let expected_score = if cfg!(windows) { 69 } else { 72 };
assert_eq!(
value,
@@ -58,9 +59,16 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
"root": root_path.clone(),
"path": "abexy",
"file_name": "abexy",
"score": 84,
"score": 88,
"indices": [0, 1, 2],
},
{
"root": root_path.clone(),
"path": "abcde",
"file_name": "abcde",
"score": 74,
"indices": [0, 1, 4],
},
{
"root": root_path.clone(),
"path": sub_abce_rel,
@@ -68,13 +76,6 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
"score": expected_score,
"indices": [4, 5, 7],
},
{
"root": root_path.clone(),
"path": "abcde",
"file_name": "abcde",
"score": 71,
"indices": [0, 1, 4],
},
]
})
);

View File

@@ -11,7 +11,6 @@ use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_execpolicy::Policy;
use codex_protocol::ThreadId;
use codex_protocol::models::ContentItem;
use codex_protocol::models::DeveloperInstructions;
@@ -359,8 +358,6 @@ fn assert_permissions_message(item: &ResponseItem) {
let expected = DeveloperInstructions::from_policy(
&SandboxPolicy::DangerFullAccess,
AskForApproval::Never,
&Policy::empty(),
false,
&PathBuf::from("/tmp"),
)
.into_text();

View File

@@ -4,43 +4,28 @@ use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::ChatGptAuthFixture;
use app_test_support::ChatGptIdTokenClaims;
use app_test_support::encode_id_token;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_models_cache;
use codex_app_server_protocol::Account;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::CancelLoginAccountStatus;
use codex_app_server_protocol::ChatgptAuthTokensRefreshReason;
use codex_app_server_protocol::ChatgptAuthTokensRefreshResponse;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAccountResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use codex_protocol::account::PlanType as AccountPlanType;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::json;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -50,14 +35,10 @@ struct CreateConfigTomlParams {
forced_method: Option<String>,
forced_workspace_id: Option<String>,
requires_openai_auth: Option<bool>,
base_url: Option<String>,
}
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let base_url = params
.base_url
.unwrap_or_else(|| "http://127.0.0.1:0/v1".to_string());
let forced_line = if let Some(method) = params.forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
@@ -85,7 +66,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{base_url}"
base_url = "http://127.0.0.1:0/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
@@ -152,627 +133,6 @@ async fn logout_account_removes_auth_and_notifies() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn set_auth_token_updates_account_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
let access_token = "access-embedded".to_string();
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token.clone(), access_token)
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert_eq!(payload.auth_mode, Some(AuthMode::ChatgptAuthTokens));
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: false,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(
account,
GetAccountResponse {
account: Some(Account::Chatgpt {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
}
);
Ok(())
}
#[tokio::test]
async fn account_read_refresh_token_is_noop_in_external_mode() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token, "access-embedded".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: true,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(
account,
GetAccountResponse {
account: Some(Account::Chatgpt {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
}
);
let refresh_request = timeout(
Duration::from_millis(250),
mcp.read_stream_until_request_message(),
)
.await;
assert!(
refresh_request.is_err(),
"external mode should not emit account/chatgptAuthTokens/refresh for refreshToken=true"
);
Ok(())
}
async fn respond_to_refresh_request(
mcp: &mut McpProcess,
access_token: &str,
id_token: &str,
) -> Result<()> {
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, params } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
assert_eq!(params.reason, ChatgptAuthTokensRefreshReason::Unauthorized);
let response = ChatgptAuthTokensRefreshResponse {
access_token: access_token.to_string(),
id_token: id_token.to_string(),
};
mcp.send_response(request_id, serde_json::to_value(response)?)
.await?;
Ok(())
}
#[tokio::test]
// 401 response triggers account/chatgptAuthTokens/refresh and retries with new tokens.
async fn external_auth_refreshes_on_unauthorized() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let success_sse = responses::sse(vec![
responses::ev_response_created("resp-turn"),
responses::ev_assistant_message("msg-turn", "turn ok"),
responses::ev_completed("resp-turn"),
]);
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let responses_mock = responses::mount_response_sequence(
&mock_server,
vec![unauthorized, responses::sse_response(success_sse)],
)
.await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let refreshed_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("refreshed@example.com")
.plan_type("pro")
.chatgpt_account_id("org-refreshed"),
)?;
let initial_access_token = "access-initial".to_string();
let refreshed_access_token = "access-refreshed".to_string();
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(
initial_id_token.clone(),
initial_access_token.clone(),
)
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id,
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
respond_to_refresh_request(&mut mcp, &refreshed_access_token, &refreshed_id_token).await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn_completed = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let requests = responses_mock.requests();
assert_eq!(requests.len(), 2);
assert_eq!(
requests[0].header("authorization"),
Some(format!("Bearer {initial_access_token}"))
);
assert_eq!(
requests[1].header("authorization"),
Some(format!("Bearer {refreshed_access_token}"))
);
Ok(())
}
#[tokio::test]
// Client returns JSON-RPC error to refresh; turn fails.
async fn external_auth_refresh_error_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_error(
request_id,
JSONRPCErrorError {
code: -32_000,
message: "refresh failed".to_string(),
data: None,
},
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
// Refresh returns tokens for the wrong workspace; turn fails.
async fn external_auth_refresh_mismatched_workspace_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_workspace_id: Some("org-expected".to_string()),
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-expected"),
)?;
let refreshed_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("refreshed@example.com")
.plan_type("pro")
.chatgpt_account_id("org-other"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_response(
request_id,
serde_json::to_value(ChatgptAuthTokensRefreshResponse {
access_token: "access-refreshed".to_string(),
id_token: refreshed_id_token,
})?,
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
// Refresh returns a malformed id_token; turn fails.
async fn external_auth_refresh_invalid_id_token_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_response(
request_id,
serde_json::to_value(ChatgptAuthTokensRefreshResponse {
access_token: "access-refreshed".to_string(),
id_token: "not-a-jwt".to_string(),
})?,
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -944,71 +304,6 @@ async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> {
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn set_auth_token_cancels_active_chatgpt_login() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Initiate the ChatGPT login flow
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { login_id, .. } = login else {
bail!("unexpected login response: {login:?}");
};
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
// Set an external auth token instead of completing the ChatGPT login flow.
// This should cancel the active login attempt.
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token, "access-embedded".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
// Verify that the active login attempt was cancelled.
// We check this by trying to cancel it and expecting a not found error.
let cancel_id = mcp
.send_cancel_login_account_request(CancelLoginAccountParams {
login_id: login_id.clone(),
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let cancel: CancelLoginAccountResponse = to_response(cancel_resp)?;
assert_eq!(cancel.status, CancelLoginAccountStatus::NotFound);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]

View File

@@ -13,13 +13,14 @@ use axum::extract::State;
use axum::http::HeaderMap;
use axum::http::StatusCode;
use axum::http::header::AUTHORIZATION;
use axum::routing::get;
use axum::routing::post;
use codex_app_server_protocol::AppInfo;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::AppsListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::connectors::ConnectorInfo;
use pretty_assertions::assert_eq;
use rmcp::handler::server::ServerHandler;
use rmcp::model::JsonObject;
@@ -70,23 +71,19 @@ async fn list_apps_returns_empty_when_connectors_disabled() -> Result<()> {
#[tokio::test]
async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
let connectors = vec![
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
AppInfo {
id: "beta".to_string(),
name: "beta".to_string(),
description: None,
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
@@ -130,8 +127,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
name: "Beta App".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
},
@@ -140,8 +135,6 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
},
@@ -157,23 +150,19 @@ async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
#[tokio::test]
async fn list_apps_paginates_results() -> Result<()> {
let connectors = vec![
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
AppInfo {
id: "beta".to_string(),
name: "beta".to_string(),
description: None,
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
@@ -217,8 +206,6 @@ async fn list_apps_paginates_results() -> Result<()> {
name: "Beta App".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
}];
@@ -247,8 +234,6 @@ async fn list_apps_paginates_results() -> Result<()> {
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
}];
@@ -304,13 +289,13 @@ impl ServerHandler for AppListMcpServer {
}
async fn start_apps_server(
connectors: Vec<AppInfo>,
connectors: Vec<ConnectorInfo>,
tools: Vec<Tool>,
) -> Result<(String, JoinHandle<()>)> {
let state = AppsServerState {
expected_bearer: "Bearer chatgpt-token".to_string(),
expected_account_id: "account-123".to_string(),
response: json!({ "apps": connectors, "next_token": null }),
response: json!({ "connectors": connectors }),
};
let state = Arc::new(state);
let tools = Arc::new(tools);
@@ -328,11 +313,7 @@ async fn start_apps_server(
);
let router = Router::new()
.route("/connectors/directory/list", get(list_directory_connectors))
.route(
"/connectors/directory/list_workspace",
get(list_directory_connectors),
)
.route("/aip/connectors/list_accessible", post(list_connectors))
.with_state(state)
.nest_service("/api/codex/apps", mcp_service);
@@ -343,7 +324,7 @@ async fn start_apps_server(
Ok((format!("http://{addr}"), handle))
}
async fn list_directory_connectors(
async fn list_connectors(
State(state): State<Arc<AppsServerState>>,
headers: HeaderMap,
) -> Result<impl axum::response::IntoResponse, StatusCode> {

View File

@@ -1,282 +0,0 @@
//! End-to-end compaction flow tests.
//!
//! Phases:
//! 1) Arrange: mock responses/compact endpoints + config.
//! 2) Act: start a thread and submit multiple turns to trigger auto-compaction.
//! 3) Assert: verify item/started + item/completed notifications for context compaction.
#![expect(clippy::expect_used)]
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_mock_responses_config_toml;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::features::Feature;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const AUTO_COMPACT_LIMIT: i64 = 1_000;
const COMPACT_PROMPT: &str = "Summarize the conversation.";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compaction_local_emits_started_and_completed_items() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let sse1 = responses::sse(vec![
responses::ev_assistant_message("m1", "FIRST_REPLY"),
responses::ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = responses::sse(vec![
responses::ev_assistant_message("m2", "SECOND_REPLY"),
responses::ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = responses::sse(vec![
responses::ev_assistant_message("m3", "LOCAL_SUMMARY"),
responses::ev_completed_with_tokens("r3", 200),
]);
let sse4 = responses::sse(vec![
responses::ev_assistant_message("m4", "FINAL_REPLY"),
responses::ev_completed_with_tokens("r4", 120),
]);
responses::mount_sse_sequence(&server, vec![sse1, sse2, sse3, sse4]).await;
let codex_home = TempDir::new()?;
write_mock_responses_config_toml(
codex_home.path(),
&server.uri(),
&BTreeMap::default(),
AUTO_COMPACT_LIMIT,
None,
"mock_provider",
COMPACT_PROMPT,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;
for message in ["first", "second", "third"] {
send_turn_and_wait(&mut mcp, &thread_id, message).await?;
}
let started = wait_for_context_compaction_started(&mut mcp).await?;
let completed = wait_for_context_compaction_completed(&mut mcp).await?;
let ThreadItem::ContextCompaction { id: started_id } = started.item else {
unreachable!("started item should be context compaction");
};
let ThreadItem::ContextCompaction { id: completed_id } = completed.item else {
unreachable!("completed item should be context compaction");
};
assert_eq!(started.thread_id, thread_id);
assert_eq!(completed.thread_id, thread_id);
assert_eq!(started_id, completed_id);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compaction_remote_emits_started_and_completed_items() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let sse1 = responses::sse(vec![
responses::ev_assistant_message("m1", "FIRST_REPLY"),
responses::ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = responses::sse(vec![
responses::ev_assistant_message("m2", "SECOND_REPLY"),
responses::ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = responses::sse(vec![
responses::ev_assistant_message("m3", "FINAL_REPLY"),
responses::ev_completed_with_tokens("r3", 120),
]);
let responses_log = responses::mount_sse_sequence(&server, vec![sse1, sse2, sse3]).await;
let compacted_history = vec![
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "REMOTE_COMPACT_SUMMARY".to_string(),
}],
end_turn: None,
},
ResponseItem::Compaction {
encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(),
},
];
let compact_mock = responses::mount_compact_json_once(
&server,
serde_json::json!({ "output": compacted_history }),
)
.await;
let codex_home = TempDir::new()?;
let mut features = BTreeMap::default();
features.insert(Feature::RemoteCompaction, true);
write_mock_responses_config_toml(
codex_home.path(),
&server.uri(),
&features,
AUTO_COMPACT_LIMIT,
Some(true),
"openai",
COMPACT_PROMPT,
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt").plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let server_base_url = format!("{}/v1", server.uri());
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[
("OPENAI_BASE_URL", Some(server_base_url.as_str())),
("OPENAI_API_KEY", None),
],
)
.await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;
for message in ["first", "second", "third"] {
send_turn_and_wait(&mut mcp, &thread_id, message).await?;
}
let started = wait_for_context_compaction_started(&mut mcp).await?;
let completed = wait_for_context_compaction_completed(&mut mcp).await?;
let ThreadItem::ContextCompaction { id: started_id } = started.item else {
unreachable!("started item should be context compaction");
};
let ThreadItem::ContextCompaction { id: completed_id } = completed.item else {
unreachable!("completed item should be context compaction");
};
assert_eq!(started.thread_id, thread_id);
assert_eq!(completed.thread_id, thread_id);
assert_eq!(started_id, completed_id);
let compact_requests = compact_mock.requests();
assert_eq!(compact_requests.len(), 1);
assert_eq!(compact_requests[0].path(), "/v1/responses/compact");
let response_requests = responses_log.requests();
assert_eq!(response_requests.len(), 3);
Ok(())
}
async fn start_thread(mcp: &mut McpProcess) -> Result<String> {
let thread_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
Ok(thread.id)
}
async fn send_turn_and_wait(mcp: &mut McpProcess, thread_id: &str, text: &str) -> Result<String> {
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread_id.to_string(),
input: vec![V2UserInput::Text {
text: text.to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
wait_for_turn_completed(mcp, &turn.id).await?;
Ok(turn.id)
}
async fn wait_for_turn_completed(mcp: &mut McpProcess, turn_id: &str) -> Result<()> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification =
serde_json::from_value(notification.params.clone().expect("turn/completed params"))?;
if completed.turn.id == turn_id {
return Ok(());
}
}
}
async fn wait_for_context_compaction_started(
mcp: &mut McpProcess,
) -> Result<ItemStartedNotification> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/started"),
)
.await??;
let started: ItemStartedNotification =
serde_json::from_value(notification.params.clone().expect("item/started params"))?;
if let ThreadItem::ContextCompaction { .. } = started.item {
return Ok(started);
}
}
}
async fn wait_for_context_compaction_completed(
mcp: &mut McpProcess,
) -> Result<ItemCompletedNotification> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/completed"),
)
.await??;
let completed: ItemCompletedNotification =
serde_json::from_value(notification.params.clone().expect("item/completed params"))?;
if let ThreadItem::ContextCompaction { .. } = completed.item {
return Ok(completed);
}
}
}

View File

@@ -2,7 +2,6 @@ mod account;
mod analytics;
mod app_list;
mod collaboration_mode_list;
mod compaction;
mod config_rpc;
mod dynamic_tools;
mod initialize;

View File

@@ -2,9 +2,7 @@ use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::Utc;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
@@ -24,14 +22,12 @@ use codex_protocol::user_input::TextElement;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT: &str = "You are Codex, a coding agent based on GPT-5. You and the user share the same workspace and collaborate to achieve the user's goals.";
const DEFAULT_BASE_INSTRUCTIONS: &str = "You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
#[tokio::test]
async fn thread_resume_returns_original_thread() -> Result<()> {
@@ -151,116 +147,6 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_without_overrides_does_not_change_updated_at_or_mtime() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let thread_id = rollout.conversation_id.clone();
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread_id.clone(),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: rollout.conversation_id.clone(),
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_resume_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_resume_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: rollout.conversation_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
@@ -368,7 +254,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
}
#[tokio::test]
async fn thread_resume_accepts_personality_override() -> Result<()> {
async fn thread_resume_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
@@ -438,14 +324,14 @@ async fn thread_resume_accepts_personality_override() -> Result<()> {
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
developer_texts
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected a personality update message in developer input, got {developer_texts:?}"
"did not expect a personality update message in developer input, got {developer_texts:?}"
);
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT),
instructions_text.contains(DEFAULT_BASE_INSTRUCTIONS),
"expected default base instructions from history, got {instructions_text:?}"
);
@@ -459,7 +345,7 @@ fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io
config_toml,
format!(
r#"
model = "gpt-5.2-codex"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
@@ -467,7 +353,6 @@ model_provider = "mock_provider"
[features]
remote_models = false
personality = true
[model_providers.mock_provider]
name = "Mock provider for test"
@@ -479,51 +364,3 @@ stream_max_retries = 0
),
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = chrono::DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
struct RolloutFixture {
conversation_id: String,
rollout_file_path: PathBuf,
before_modified: std::time::SystemTime,
expected_updated_at: i64,
}
fn setup_rollout_fixture(codex_home: &Path, server_uri: &str) -> Result<RolloutFixture> {
create_config_toml(codex_home, server_uri)?;
let preview = "Saved user message";
let filename_ts = "2025-01-05T12-00-00";
let meta_rfc3339 = "2025-01-05T12:00:00Z";
let expected_updated_at_rfc3339 = "2025-01-07T00:00:00Z";
let conversation_id = create_fake_rollout_with_text_elements(
codex_home,
filename_ts,
meta_rfc3339,
preview,
Vec::new(),
Some("mock_provider"),
None,
)?;
let rollout_file_path = rollout_path(codex_home, filename_ts, &conversation_id);
set_rollout_mtime(rollout_file_path.as_path(), expected_updated_at_rfc3339)?;
let before_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
let expected_updated_at = chrono::DateTime::parse_from_rfc3339(expected_updated_at_rfc3339)?
.with_timezone(&Utc)
.timestamp();
Ok(RolloutFixture {
conversation_id,
rollout_file_path,
before_modified,
expected_updated_at,
})
}

View File

@@ -63,7 +63,7 @@ async fn turn_start_sends_originator_header() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -138,7 +138,7 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -230,7 +230,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -425,7 +425,7 @@ async fn turn_start_accepts_personality_override_v2() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -473,7 +473,6 @@ async fn turn_start_accepts_personality_override_v2() -> Result<()> {
if developer_texts.is_empty() {
eprintln!("request body: {}", request.body_json());
}
assert!(
developer_texts
.iter()

View File

@@ -1,5 +1,4 @@
use crate::types::CodeTaskDetailsResponse;
use crate::types::ConfigFileResponse;
use crate::types::CreditStatusDetails;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
@@ -245,20 +244,6 @@ impl Client {
self.decode_json::<TurnAttemptsSiblingTurnsResponse>(&url, &ct, &body)
}
/// Fetch the managed requirements file from codex-backend.
///
/// `GET /api/codex/config/requirements` (Codex API style) or
/// `GET /wham/config/requirements` (ChatGPT backend-api style).
pub async fn get_config_requirements_file(&self) -> Result<ConfigFileResponse> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/config/requirements", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/config/requirements", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
self.decode_json::<ConfigFileResponse>(&url, &ct, &body)
}
/// Create a new task (user turn) by POSTing to the appropriate backend path
/// based on `path_style`. Returns the created task id.
pub async fn create_task(&self, request_body: serde_json::Value) -> Result<String> {
@@ -351,7 +336,6 @@ impl Client {
fn map_plan_type(plan_type: crate::types::PlanType) -> AccountPlanType {
match plan_type {
crate::types::PlanType::Free => AccountPlanType::Free,
crate::types::PlanType::Go => AccountPlanType::Go,
crate::types::PlanType::Plus => AccountPlanType::Plus,
crate::types::PlanType::Pro => AccountPlanType::Pro,
crate::types::PlanType::Team => AccountPlanType::Team,
@@ -359,6 +343,7 @@ impl Client {
crate::types::PlanType::Enterprise => AccountPlanType::Enterprise,
crate::types::PlanType::Edu | crate::types::PlanType::Education => AccountPlanType::Edu,
crate::types::PlanType::Guest
| crate::types::PlanType::Go
| crate::types::PlanType::FreeWorkspace
| crate::types::PlanType::Quorum
| crate::types::PlanType::K12 => AccountPlanType::Unknown,

View File

@@ -4,7 +4,6 @@ pub mod types;
pub use client::Client;
pub use types::CodeTaskDetailsResponse;
pub use types::CodeTaskDetailsResponseExt;
pub use types::ConfigFileResponse;
pub use types::PaginatedListTaskListItem;
pub use types::TaskListItem;
pub use types::TurnAttemptsSiblingTurnsResponse;

View File

@@ -1,4 +1,3 @@
pub use codex_backend_openapi_models::models::ConfigFileResponse;
pub use codex_backend_openapi_models::models::CreditStatusDetails;
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;
pub use codex_backend_openapi_models::models::PlanType;

View File

@@ -17,8 +17,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
codex-git = { workspace = true }
urlencoding = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }

View File

@@ -5,21 +5,13 @@ use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use anyhow::Context;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::time::Duration;
/// Make a GET request to the ChatGPT backend API.
pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
config: &Config,
path: String,
) -> anyhow::Result<T> {
chatgpt_get_request_with_timeout(config, path, None).await
}
pub(crate) async fn chatgpt_get_request_with_timeout<T: DeserializeOwned>(
config: &Config,
path: String,
timeout: Option<Duration>,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
@@ -36,17 +28,48 @@ pub(crate) async fn chatgpt_get_request_with_timeout<T: DeserializeOwned>(
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
});
let mut request = client
let response = client
.get(&url)
.bearer_auth(&token.access_token)
.header("chatgpt-account-id", account_id?)
.header("Content-Type", "application/json");
if let Some(timeout) = timeout {
request = request.timeout(timeout);
}
let response = request.send().await.context("Failed to send request")?;
.header("Content-Type", "application/json")
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response
.json()
.await
.context("Failed to parse JSON response")?;
Ok(result)
} else {
let status = response.status();
let body = response.text().await.unwrap_or_default();
anyhow::bail!("Request failed with status {status}: {body}")
}
}
pub(crate) async fn chatgpt_post_request<T: DeserializeOwned, P: Serialize>(
config: &Config,
access_token: &str,
account_id: &str,
path: &str,
payload: &P,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
let client = create_client();
let url = format!("{chatgpt_base_url}{path}");
let response = client
.post(&url)
.bearer_auth(access_token)
.header("chatgpt-account-id", account_id)
.header("Content-Type", "application/json")
.json(payload)
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response

View File

@@ -1,45 +1,43 @@
use std::collections::HashMap;
use codex_core::config::Config;
use codex_core::features::Feature;
use serde::Deserialize;
use std::time::Duration;
use serde::Serialize;
use crate::chatgpt_client::chatgpt_get_request_with_timeout;
use crate::chatgpt_client::chatgpt_post_request;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
pub use codex_core::connectors::AppInfo;
pub use codex_core::connectors::ConnectorInfo;
pub use codex_core::connectors::connector_display_label;
use codex_core::connectors::connector_install_url;
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools;
use codex_core::connectors::merge_connectors;
#[derive(Debug, Deserialize)]
struct DirectoryListResponse {
apps: Vec<DirectoryApp>,
#[serde(alias = "nextToken")]
next_token: Option<String>,
#[derive(Debug, Serialize)]
struct ListConnectorsRequest {
principals: Vec<Principal>,
}
#[derive(Debug, Deserialize, Clone)]
struct DirectoryApp {
#[derive(Debug, Serialize)]
struct Principal {
#[serde(rename = "type")]
principal_type: PrincipalType,
id: String,
name: String,
description: Option<String>,
#[serde(alias = "logoUrl")]
logo_url: Option<String>,
#[serde(alias = "logoUrlDark")]
logo_url_dark: Option<String>,
#[serde(alias = "distributionChannel")]
distribution_channel: Option<String>,
visibility: Option<String>,
}
const DIRECTORY_CONNECTORS_TIMEOUT: Duration = Duration::from_secs(60);
#[derive(Debug, Serialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
enum PrincipalType {
User,
}
pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
if !config.features.enabled(Feature::Apps) {
#[derive(Debug, Deserialize)]
struct ListConnectorsResponse {
connectors: Vec<ConnectorInfo>,
}
pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
let (connectors_result, accessible_result) = tokio::join!(
@@ -48,12 +46,11 @@ pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
);
let connectors = connectors_result?;
let accessible = accessible_result?;
let merged = merge_connectors(connectors, accessible);
Ok(filter_disallowed_connectors(merged))
Ok(merge_connectors(connectors, accessible))
}
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
if !config.features.enabled(Feature::Apps) {
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
@@ -61,149 +58,56 @@ pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>
let token_data =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let mut apps = list_directory_connectors(config).await?;
if token_data.id_token.is_workspace_account() {
apps.extend(list_workspace_connectors(config).await?);
}
let mut connectors = merge_directory_apps(apps)
.into_iter()
.map(directory_app_to_app_info)
.collect::<Vec<_>>();
let user_id = token_data
.id_token
.chatgpt_user_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT user ID not available, please re-run `codex login`")
})?;
let account_id = token_data
.id_token
.chatgpt_account_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
})?;
let principal_id = format!("{user_id}__{account_id}");
let request = ListConnectorsRequest {
principals: vec![Principal {
principal_type: PrincipalType::User,
id: principal_id,
}],
};
let response: ListConnectorsResponse = chatgpt_post_request(
config,
token_data.access_token.as_str(),
account_id,
"/aip/connectors/list_accessible?skip_actions=true&external_logos=true",
&request,
)
.await?;
let mut connectors = response.connectors;
for connector in &mut connectors {
let install_url = match connector.install_url.take() {
Some(install_url) => install_url,
None => connector_install_url(&connector.name, &connector.id),
None => connector_install_url(&connector.connector_name, &connector.connector_id),
};
connector.name = normalize_connector_name(&connector.name, &connector.id);
connector.description = normalize_connector_value(connector.description.as_deref());
connector.connector_name =
normalize_connector_name(&connector.connector_name, &connector.connector_id);
connector.connector_description =
normalize_connector_value(connector.connector_description.as_deref());
connector.install_url = Some(install_url);
connector.is_accessible = false;
}
connectors.sort_by(|left, right| {
left.name
.cmp(&right.name)
.then_with(|| left.id.cmp(&right.id))
left.connector_name
.cmp(&right.connector_name)
.then_with(|| left.connector_id.cmp(&right.connector_id))
});
Ok(connectors)
}
async fn list_directory_connectors(config: &Config) -> anyhow::Result<Vec<DirectoryApp>> {
let mut apps = Vec::new();
let mut next_token: Option<String> = None;
loop {
let path = match next_token.as_deref() {
Some(token) => {
let encoded_token = urlencoding::encode(token);
format!("/connectors/directory/list?tier=categorized&token={encoded_token}")
}
None => "/connectors/directory/list?tier=categorized".to_string(),
};
let response: DirectoryListResponse =
chatgpt_get_request_with_timeout(config, path, Some(DIRECTORY_CONNECTORS_TIMEOUT))
.await?;
apps.extend(
response
.apps
.into_iter()
.filter(|app| !is_hidden_directory_app(app)),
);
next_token = response
.next_token
.map(|token| token.trim().to_string())
.filter(|token| !token.is_empty());
if next_token.is_none() {
break;
}
}
Ok(apps)
}
async fn list_workspace_connectors(config: &Config) -> anyhow::Result<Vec<DirectoryApp>> {
let response: anyhow::Result<DirectoryListResponse> = chatgpt_get_request_with_timeout(
config,
"/connectors/directory/list_workspace".to_string(),
Some(DIRECTORY_CONNECTORS_TIMEOUT),
)
.await;
match response {
Ok(response) => Ok(response
.apps
.into_iter()
.filter(|app| !is_hidden_directory_app(app))
.collect()),
Err(_) => Ok(Vec::new()),
}
}
fn merge_directory_apps(apps: Vec<DirectoryApp>) -> Vec<DirectoryApp> {
let mut merged: HashMap<String, DirectoryApp> = HashMap::new();
for app in apps {
if let Some(existing) = merged.get_mut(&app.id) {
merge_directory_app(existing, app);
} else {
merged.insert(app.id.clone(), app);
}
}
merged.into_values().collect()
}
fn merge_directory_app(existing: &mut DirectoryApp, incoming: DirectoryApp) {
let DirectoryApp {
id: _,
name,
description,
logo_url,
logo_url_dark,
distribution_channel,
visibility: _,
} = incoming;
let incoming_name_is_empty = name.trim().is_empty();
if existing.name.trim().is_empty() && !incoming_name_is_empty {
existing.name = name;
}
let incoming_description_present = description
.as_deref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
let existing_description_present = existing
.description
.as_deref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
if !existing_description_present && incoming_description_present {
existing.description = description;
}
if existing.logo_url.is_none() && logo_url.is_some() {
existing.logo_url = logo_url;
}
if existing.logo_url_dark.is_none() && logo_url_dark.is_some() {
existing.logo_url_dark = logo_url_dark;
}
if existing.distribution_channel.is_none() && distribution_channel.is_some() {
existing.distribution_channel = distribution_channel;
}
}
fn is_hidden_directory_app(app: &DirectoryApp) -> bool {
matches!(app.visibility.as_deref(), Some("HIDDEN"))
}
fn directory_app_to_app_info(app: DirectoryApp) -> AppInfo {
AppInfo {
id: app.id,
name: app.name,
description: app.description,
logo_url: app.logo_url,
logo_url_dark: app.logo_url_dark,
distribution_channel: app.distribution_channel,
install_url: None,
is_accessible: false,
}
}
fn normalize_connector_name(name: &str, connector_id: &str) -> String {
let trimmed = name.trim();
if trimmed.is_empty() {
@@ -219,91 +123,3 @@ fn normalize_connector_value(value: Option<&str>) -> Option<String> {
.filter(|value| !value.is_empty())
.map(str::to_string)
}
const ALLOWED_APPS_SDK_APPS: &[&str] = &["asdk_app_69781557cc1481919cf5e9824fa2e792"];
const DISALLOWED_CONNECTOR_IDS: &[&str] = &[
"asdk_app_6938a94a61d881918ef32cb999ff937c",
"connector_2b0a9009c9c64bf9933a3dae3f2b1254",
"connector_68de829bf7648191acd70a907364c67c",
];
const DISALLOWED_CONNECTOR_PREFIX: &str = "connector_openai_";
fn filter_disallowed_connectors(connectors: Vec<AppInfo>) -> Vec<AppInfo> {
// TODO: Support Apps SDK connectors.
connectors
.into_iter()
.filter(is_connector_allowed)
.collect()
}
fn is_connector_allowed(connector: &AppInfo) -> bool {
let connector_id = connector.id.as_str();
if connector_id.starts_with(DISALLOWED_CONNECTOR_PREFIX)
|| DISALLOWED_CONNECTOR_IDS.contains(&connector_id)
{
return false;
}
if connector_id.starts_with("asdk_app_") {
return ALLOWED_APPS_SDK_APPS.contains(&connector_id);
}
true
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn app(id: &str) -> AppInfo {
AppInfo {
id: id.to_string(),
name: id.to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
}
}
#[test]
fn filters_internal_asdk_connectors() {
let filtered = filter_disallowed_connectors(vec![app("asdk_app_hidden"), app("alpha")]);
assert_eq!(filtered, vec![app("alpha")]);
}
#[test]
fn allows_whitelisted_asdk_connectors() {
let filtered = filter_disallowed_connectors(vec![
app("asdk_app_69781557cc1481919cf5e9824fa2e792"),
app("beta"),
]);
assert_eq!(
filtered,
vec![
app("asdk_app_69781557cc1481919cf5e9824fa2e792"),
app("beta")
]
);
}
#[test]
fn filters_openai_connectors() {
let filtered = filter_disallowed_connectors(vec![
app("connector_openai_foo"),
app("connector_openai_bar"),
app("gamma"),
]);
assert_eq!(filtered, vec![app("gamma")]);
}
#[test]
fn filters_disallowed_connector_ids() {
let filtered = filter_disallowed_connectors(vec![
app("asdk_app_6938a94a61d881918ef32cb999ff937c"),
app("delta"),
]);
assert_eq!(filtered, vec![app("delta")]);
}
}

View File

@@ -136,8 +136,7 @@ async fn run_command_under_sandbox(
if let SandboxType::Windows = sandbox_type {
#[cfg(target_os = "windows")]
{
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_core::features::Feature;
use codex_windows_sandbox::run_windows_sandbox_capture;
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
@@ -148,10 +147,8 @@ async fn run_command_under_sandbox(
let env_map = env.clone();
let command_vec = command.clone();
let base_dir = config.codex_home.clone();
let use_elevated = matches!(
WindowsSandboxLevel::from_config(&config),
WindowsSandboxLevel::Elevated
);
let use_elevated = config.features.enabled(Feature::WindowsSandbox)
&& config.features.enabled(Feature::WindowsSandboxElevated);
// Preflight audit is invoked elsewhere at the appropriate times.
let res = tokio::task::spawn_blocking(move || {

View File

@@ -225,7 +225,7 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides).await;
match CodexAuth::from_auth_storage(&config.codex_home, config.cli_auth_credentials_store_mode) {
Ok(Some(auth)) => match auth.api_auth_mode() {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token() {
Ok(api_key) => {
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
@@ -240,10 +240,6 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
eprintln!("Logged in using ChatGPT");
std::process::exit(0);
}
AuthMode::ChatgptAuthTokens => {
eprintln!("Logged in using ChatGPT (external tokens)");
std::process::exit(0);
}
},
Ok(None) => {
eprintln!("Not logged in");

View File

@@ -39,9 +39,6 @@ use crate::mcp_cmd::McpCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::find_codex_home;
use codex_core::features::Stage;
use codex_core::features::is_known_feature_key;
use codex_core::terminal::TerminalName;
@@ -144,13 +141,13 @@ struct CompletionCommand {
#[derive(Debug, Parser)]
struct ResumeCommand {
/// Conversation/session id (UUID) or thread name. UUIDs take precedence if it parses.
/// Conversation/session id (UUID). When provided, resumes this session.
/// If omitted, use --last to pick the most recent recorded session.
#[arg(value_name = "SESSION_ID")]
session_id: Option<String>,
/// Continue the most recent session without showing the picker.
#[arg(long = "last", default_value_t = false)]
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
last: bool,
/// Show all sessions (disables cwd filtering and shows CWD column).
@@ -323,7 +320,6 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
let AppExitInfo {
token_usage,
thread_id: conversation_id,
thread_name,
..
} = exit_info;
@@ -336,9 +332,8 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
codex_core::protocol::FinalOutput::from(token_usage)
)];
if let Some(resume_cmd) =
codex_core::util::resume_command(thread_name.as_deref(), conversation_id)
{
if let Some(session_id) = conversation_id {
let resume_cmd = format!("codex resume {session_id}");
let command = if color_enabled {
resume_cmd.cyan().to_string()
} else {
@@ -453,16 +448,6 @@ struct FeaturesCli {
enum FeaturesSubcommand {
/// List known features with their stage and effective state.
List,
/// Enable a feature in config.toml.
Enable(FeatureSetArgs),
/// Disable a feature in config.toml.
Disable(FeatureSetArgs),
}
#[derive(Debug, Parser)]
struct FeatureSetArgs {
/// Feature key to update (for example: unified_exec).
feature: String,
}
fn stage_str(stage: codex_core::features::Stage) -> &'static str {
@@ -726,69 +711,12 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
println!("{name:<name_width$} {stage:<stage_width$} {enabled}");
}
}
FeaturesSubcommand::Enable(FeatureSetArgs { feature }) => {
enable_feature_in_config(&interactive, &feature).await?;
}
FeaturesSubcommand::Disable(FeatureSetArgs { feature }) => {
disable_feature_in_config(&interactive, &feature).await?;
}
},
}
Ok(())
}
async fn enable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyhow::Result<()> {
FeatureToggles::validate_feature(feature)?;
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, true)
.apply()
.await?;
println!("Enabled feature `{feature}` in config.toml.");
maybe_print_under_development_feature_warning(&codex_home, interactive, feature);
Ok(())
}
async fn disable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyhow::Result<()> {
FeatureToggles::validate_feature(feature)?;
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, false)
.apply()
.await?;
println!("Disabled feature `{feature}` in config.toml.");
Ok(())
}
fn maybe_print_under_development_feature_warning(
codex_home: &std::path::Path,
interactive: &TuiCli,
feature: &str,
) {
if interactive.config_profile.is_some() {
return;
}
let Some(spec) = codex_core::features::FEATURES
.iter()
.find(|spec| spec.key == feature)
else {
return;
};
if !matches!(spec.stage, Stage::UnderDevelopment) {
return;
}
let config_path = codex_home.join(codex_core::config::CONFIG_TOML_FILE);
eprintln!(
"Under-development features enabled: {feature}. Under-development features are incomplete and may behave unpredictably. To suppress this warning, set `suppress_unstable_features_warning = true` in {}.",
config_path.display()
);
}
/// Prepend root-level overrides so they have lower precedence than
/// CLI-specific ones specified after the subcommand (if any).
fn prepend_config_flags(
@@ -1004,24 +932,6 @@ mod tests {
finalize_fork_interactive(interactive, root_overrides, session_id, last, all, fork_cli)
}
#[test]
fn exec_resume_last_accepts_prompt_positional() {
let cli =
MultitoolCli::try_parse_from(["codex", "exec", "--json", "resume", "--last", "2+2"])
.expect("parse should succeed");
let Some(Subcommand::Exec(exec)) = cli.subcommand else {
panic!("expected exec subcommand");
};
let Some(codex_exec::Command::Resume(args)) = exec.command else {
panic!("expected exec resume");
};
assert!(args.last);
assert_eq!(args.session_id, None);
assert_eq!(args.prompt.as_deref(), Some("2+2"));
}
fn app_server_from_args(args: &[&str]) -> AppServerCommand {
let cli = MultitoolCli::try_parse_from(args).expect("parse");
let Subcommand::AppServer(app_server) = cli.subcommand.expect("app-server present") else {
@@ -1030,7 +940,7 @@ mod tests {
app_server
}
fn sample_exit_info(conversation_id: Option<&str>, thread_name: Option<&str>) -> AppExitInfo {
fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo {
let token_usage = TokenUsage {
output_tokens: 2,
total_tokens: 2,
@@ -1038,10 +948,7 @@ mod tests {
};
AppExitInfo {
token_usage,
thread_id: conversation_id
.map(ThreadId::from_string)
.map(Result::unwrap),
thread_name: thread_name.map(str::to_string),
thread_id: conversation.map(ThreadId::from_string).map(Result::unwrap),
update_action: None,
exit_reason: ExitReason::UserRequested,
}
@@ -1052,7 +959,6 @@ mod tests {
let exit_info = AppExitInfo {
token_usage: TokenUsage::default(),
thread_id: None,
thread_name: None,
update_action: None,
exit_reason: ExitReason::UserRequested,
};
@@ -1062,7 +968,7 @@ mod tests {
#[test]
fn format_exit_messages_includes_resume_hint_without_color() {
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"), None);
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
let lines = format_exit_messages(exit_info, false);
assert_eq!(
lines,
@@ -1076,28 +982,12 @@ mod tests {
#[test]
fn format_exit_messages_applies_color_when_enabled() {
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"), None);
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
let lines = format_exit_messages(exit_info, true);
assert_eq!(lines.len(), 2);
assert!(lines[1].contains("\u{1b}[36m"));
}
#[test]
fn format_exit_messages_prefers_thread_name() {
let exit_info = sample_exit_info(
Some("123e4567-e89b-12d3-a456-426614174000"),
Some("my-thread"),
);
let lines = format_exit_messages(exit_info, false);
assert_eq!(
lines,
vec![
"Token usage: total=2 input=0 output=2".to_string(),
"To continue this session, run codex resume my-thread".to_string(),
]
);
}
#[test]
fn resume_model_flag_applies_when_no_root_flags() {
let interactive =
@@ -1263,32 +1153,6 @@ mod tests {
assert!(app_server.analytics_default_enabled);
}
#[test]
fn features_enable_parses_feature_name() {
let cli = MultitoolCli::try_parse_from(["codex", "features", "enable", "unified_exec"])
.expect("parse should succeed");
let Some(Subcommand::Features(FeaturesCli { sub })) = cli.subcommand else {
panic!("expected features subcommand");
};
let FeaturesSubcommand::Enable(FeatureSetArgs { feature }) = sub else {
panic!("expected features enable");
};
assert_eq!(feature, "unified_exec");
}
#[test]
fn features_disable_parses_feature_name() {
let cli = MultitoolCli::try_parse_from(["codex", "features", "disable", "shell_tool"])
.expect("parse should succeed");
let Some(Subcommand::Features(FeaturesCli { sub })) = cli.subcommand else {
panic!("expected features subcommand");
};
let FeaturesSubcommand::Disable(FeatureSetArgs { feature }) = sub else {
panic!("expected features disable");
};
assert_eq!(feature, "shell_tool");
}
#[test]
fn feature_toggles_known_features_generate_overrides() {
let toggles = FeatureToggles {

View File

@@ -13,12 +13,11 @@ use codex_core::config::find_codex_home;
use codex_core::config::load_global_mcp_servers;
use codex_core::config::types::McpServerConfig;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::mcp::auth::McpOAuthLoginSupport;
use codex_core::mcp::auth::compute_auth_statuses;
use codex_core::mcp::auth::oauth_login_support;
use codex_core::protocol::McpAuthStatus;
use codex_rmcp_client::delete_oauth_tokens;
use codex_rmcp_client::perform_oauth_login;
use codex_rmcp_client::supports_oauth_login;
/// Subcommands:
/// - `list` — list configured servers (with `--json`)
@@ -261,25 +260,33 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
println!("Added global MCP server '{name}'.");
match oauth_login_support(&transport).await {
McpOAuthLoginSupport::Supported(oauth_config) => {
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&oauth_config.url,
config.mcp_oauth_credentials_store_mode,
oauth_config.http_headers,
oauth_config.env_http_headers,
&Vec::new(),
config.mcp_oauth_callback_port,
)
.await?;
println!("Successfully logged in.");
if let McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var: None,
http_headers,
env_http_headers,
} = transport
{
match supports_oauth_login(&url).await {
Ok(true) => {
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&url,
config.mcp_oauth_credentials_store_mode,
http_headers.clone(),
env_http_headers.clone(),
&Vec::new(),
config.mcp_oauth_callback_port,
)
.await?;
println!("Successfully logged in.");
}
Ok(false) => {}
Err(_) => println!(
"MCP server may or may not require login. Run `codex mcp login {name}` to login."
),
}
McpOAuthLoginSupport::Unsupported => {}
McpOAuthLoginSupport::Unknown(_) => println!(
"MCP server may or may not require login. Run `codex mcp login {name}` to login."
),
}
Ok(())

View File

@@ -1,58 +0,0 @@
use std::path::Path;
use anyhow::Result;
use predicates::str::contains;
use tempfile::TempDir;
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
cmd.env("CODEX_HOME", codex_home);
Ok(cmd)
}
#[tokio::test]
async fn features_enable_writes_feature_flag_to_config() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "enable", "unified_exec"])
.assert()
.success()
.stdout(contains("Enabled feature `unified_exec` in config.toml."));
let config = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
assert!(config.contains("[features]"));
assert!(config.contains("unified_exec = true"));
Ok(())
}
#[tokio::test]
async fn features_disable_writes_feature_flag_to_config() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "disable", "shell_tool"])
.assert()
.success()
.stdout(contains("Disabled feature `shell_tool` in config.toml."));
let config = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
assert!(config.contains("[features]"));
assert!(config.contains("shell_tool = false"));
Ok(())
}
#[tokio::test]
async fn features_enable_under_development_feature_prints_warning() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "enable", "sqlite"])
.assert()
.success()
.stderr(contains("Under-development features enabled: sqlite."));
Ok(())
}

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "cloud-requirements",
crate_name = "codex_cloud_requirements",
)

View File

@@ -1,26 +0,0 @@
[package]
name = "codex-cloud-requirements"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
async-trait = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-backend-client = { workspace = true }
codex-core = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
tokio = { workspace = true, features = ["sync", "time"] }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
base64 = { workspace = true }
pretty_assertions = { workspace = true }
serde_json = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "test-util", "time"] }

View File

@@ -1,338 +0,0 @@
//! Cloud-hosted config requirements for Codex.
//!
//! This crate fetches `requirements.toml` data from the backend as an alternative to loading it
//! from the local filesystem. It only applies to Enterprise ChatGPT customers.
//!
//! Today, fetching is best-effort: on error or timeout, Codex continues without cloud requirements.
//! We expect to tighten this so that Enterprise ChatGPT customers must successfully fetch these
//! requirements before Codex will run.
use async_trait::async_trait;
use codex_app_server_protocol::AuthMode;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
use codex_core::auth::CodexAuth;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_protocol::account::PlanType;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use tokio::time::timeout;
/// This blocks codecs startup, so must be short.
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(5);
#[async_trait]
trait RequirementsFetcher: Send + Sync {
/// Returns requirements as a TOML string.
///
/// TODO(gt): For now, returns an Option. But when we want to make this fail-closed, return a
/// Result.
async fn fetch_requirements(&self, auth: &CodexAuth) -> Option<String>;
}
struct BackendRequirementsFetcher {
base_url: String,
}
impl BackendRequirementsFetcher {
fn new(base_url: String) -> Self {
Self { base_url }
}
}
#[async_trait]
impl RequirementsFetcher for BackendRequirementsFetcher {
async fn fetch_requirements(&self, auth: &CodexAuth) -> Option<String> {
let client = BackendClient::from_auth(self.base_url.clone(), auth)
.inspect_err(|err| {
tracing::warn!(
error = %err,
"Failed to construct backend client for cloud requirements"
);
})
.ok()?;
let response = client
.get_config_requirements_file()
.await
.inspect_err(|err| tracing::warn!(error = %err, "Failed to fetch cloud requirements"))
.ok()?;
let Some(contents) = response.contents else {
tracing::warn!("Cloud requirements response missing contents");
return None;
};
Some(contents)
}
}
struct CloudRequirementsService {
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
timeout: Duration,
}
impl CloudRequirementsService {
fn new(
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
timeout: Duration,
) -> Self {
Self {
auth_manager,
fetcher,
timeout,
}
}
async fn fetch_with_timeout(&self) -> Option<ConfigRequirementsToml> {
let _timer =
codex_otel::start_global_timer("codex.cloud_requirements.fetch.duration_ms", &[]);
let started_at = Instant::now();
let result = timeout(self.timeout, self.fetch())
.await
.inspect_err(|_| {
tracing::warn!("Timed out waiting for cloud requirements; continuing without them");
})
.ok()?;
match result.as_ref() {
Some(requirements) => {
tracing::info!(
elapsed_ms = started_at.elapsed().as_millis(),
requirements = ?requirements,
"Cloud requirements load completed"
);
}
None => {
tracing::info!(
elapsed_ms = started_at.elapsed().as_millis(),
"Cloud requirements load completed (none)"
);
}
}
result
}
async fn fetch(&self) -> Option<ConfigRequirementsToml> {
let auth = self.auth_manager.auth().await?;
if !(auth.mode == AuthMode::ChatGPT
&& auth.account_plan_type() == Some(PlanType::Enterprise))
{
return None;
}
let contents = self.fetcher.fetch_requirements(&auth).await?;
parse_cloud_requirements(&contents)
.inspect_err(|err| tracing::warn!(error = %err, "Failed to parse cloud requirements"))
.ok()
.flatten()
}
}
pub fn cloud_requirements_loader(
auth_manager: Arc<AuthManager>,
chatgpt_base_url: String,
) -> CloudRequirementsLoader {
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(BackendRequirementsFetcher::new(chatgpt_base_url)),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let task = tokio::spawn(async move { service.fetch_with_timeout().await });
CloudRequirementsLoader::new(async move {
task.await
.inspect_err(|err| tracing::warn!(error = %err, "Cloud requirements task failed"))
.ok()
.flatten()
})
}
fn parse_cloud_requirements(
contents: &str,
) -> Result<Option<ConfigRequirementsToml>, toml::de::Error> {
if contents.trim().is_empty() {
return Ok(None);
}
let requirements: ConfigRequirementsToml = toml::from_str(contents)?;
if requirements.is_empty() {
Ok(None)
} else {
Ok(Some(requirements))
}
}
#[cfg(test)]
mod tests {
use super::*;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::protocol::AskForApproval;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::future::pending;
use std::path::Path;
use tempfile::tempdir;
fn write_auth_json(codex_home: &Path, value: serde_json::Value) -> std::io::Result<()> {
std::fs::write(codex_home.join("auth.json"), serde_json::to_string(&value)?)?;
Ok(())
}
fn auth_manager_with_api_key() -> Arc<AuthManager> {
let tmp = tempdir().expect("tempdir");
let auth_json = json!({
"OPENAI_API_KEY": "sk-test-key",
"tokens": null,
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json).expect("write auth");
Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
))
}
fn auth_manager_with_plan(plan_type: &str) -> Arc<AuthManager> {
let tmp = tempdir().expect("tempdir");
let header = json!({ "alg": "none", "typ": "JWT" });
let auth_payload = json!({
"chatgpt_plan_type": plan_type,
"chatgpt_user_id": "user-12345",
"user_id": "user-12345",
});
let payload = json!({
"email": "user@example.com",
"https://api.openai.com/auth": auth_payload,
});
let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).expect("header"));
let payload_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).expect("payload"));
let signature_b64 = URL_SAFE_NO_PAD.encode(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
let auth_json = json!({
"OPENAI_API_KEY": null,
"tokens": {
"id_token": fake_jwt,
"access_token": "test-access-token",
"refresh_token": "test-refresh-token",
},
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json).expect("write auth");
Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
))
}
fn parse_for_fetch(contents: Option<&str>) -> Option<ConfigRequirementsToml> {
contents.and_then(|contents| parse_cloud_requirements(contents).ok().flatten())
}
struct StaticFetcher {
contents: Option<String>,
}
#[async_trait::async_trait]
impl RequirementsFetcher for StaticFetcher {
async fn fetch_requirements(&self, _auth: &CodexAuth) -> Option<String> {
self.contents.clone()
}
}
struct PendingFetcher;
#[async_trait::async_trait]
impl RequirementsFetcher for PendingFetcher {
async fn fetch_requirements(&self, _auth: &CodexAuth) -> Option<String> {
pending::<()>().await;
None
}
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_chatgpt_auth() {
let auth_manager = auth_manager_with_api_key();
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(StaticFetcher { contents: None }),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_enterprise_plan() {
let auth_manager = auth_manager_with_plan("pro");
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(StaticFetcher { contents: None }),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_missing_contents() {
let result = parse_for_fetch(None);
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_empty_contents() {
let result = parse_for_fetch(Some(" "));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_invalid_toml() {
let result = parse_for_fetch(Some("not = ["));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_empty_requirements() {
let result = parse_for_fetch(Some("# comment"));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_parses_valid_toml() {
let result = parse_for_fetch(Some("allowed_approval_policies = [\"never\"]"));
assert_eq!(
result,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
})
);
}
#[tokio::test(start_paused = true)]
async fn fetch_cloud_requirements_times_out() {
let auth_manager = auth_manager_with_plan("enterprise");
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(PendingFetcher),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let handle = tokio::spawn(async move { service.fetch_with_timeout().await });
tokio::time::advance(CLOUD_REQUIREMENTS_TIMEOUT + Duration::from_millis(1)).await;
let result = handle.await.expect("cloud requirements task");
assert!(result.is_none());
}
}

View File

@@ -291,7 +291,7 @@ pub fn process_responses_event(
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
return Ok(Some(ResponseEvent::OutputItemAdded(item)));
}
debug!("failed to parse ResponseItem from output_item.added");
debug!("failed to parse ResponseItem from output_item.done");
}
}
"response.reasoning_summary_part.added" => {

View File

@@ -77,7 +77,7 @@ async fn models_client_hits_models_endpoint() {
priority: 1,
upgrade: None,
base_instructions: "base instructions".to_string(),
model_messages: None,
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -1,40 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConfigFileResponse {
#[serde(rename = "contents", skip_serializing_if = "Option::is_none")]
pub contents: Option<String>,
#[serde(rename = "sha256", skip_serializing_if = "Option::is_none")]
pub sha256: Option<String>,
#[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<String>,
#[serde(rename = "updated_by_user_id", skip_serializing_if = "Option::is_none")]
pub updated_by_user_id: Option<String>,
}
impl ConfigFileResponse {
pub fn new(
contents: Option<String>,
sha256: Option<String>,
updated_at: Option<String>,
updated_by_user_id: Option<String>,
) -> ConfigFileResponse {
ConfigFileResponse {
contents,
sha256,
updated_at,
updated_by_user_id,
}
}
}

View File

@@ -3,10 +3,6 @@
// Currently export only the types referenced by the workspace
// The process for this will change
// Config
pub mod config_file_response;
pub use self::config_file_response::ConfigFileResponse;
// Cloud Tasks
pub mod code_task_details_response;
pub use self::code_task_details_response::CodeTaskDetailsResponse;

View File

@@ -37,7 +37,6 @@ codex-keyring-store = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-state = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-pty = { workspace = true }
codex-utils-readiness = { workspace = true }
@@ -56,7 +55,6 @@ indoc = { workspace = true }
keyring = { workspace = true, features = ["crypto-rust"] }
libc = { workspace = true }
mcp-types = { workspace = true }
multimap = { workspace = true }
once_cell = { workspace = true }
os_info = { workspace = true }
rand = { workspace = true }

View File

@@ -111,13 +111,6 @@
"auto"
],
"type": "string"
},
{
"description": "Store credentials in memory only for the current process.",
"enum": [
"ephemeral"
],
"type": "string"
}
]
},
@@ -151,9 +144,6 @@
"apply_patch_freeform": {
"type": "boolean"
},
"apps": {
"type": "boolean"
},
"child_agents_md": {
"type": "boolean"
},
@@ -190,9 +180,6 @@
"include_apply_patch_tool": {
"type": "boolean"
},
"personality": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},
@@ -202,9 +189,6 @@
"remote_models": {
"type": "boolean"
},
"request_rule": {
"type": "boolean"
},
"responses_websockets": {
"type": "boolean"
},
@@ -214,15 +198,6 @@
"shell_tool": {
"type": "boolean"
},
"skill_env_var_dependency_prompt": {
"type": "boolean"
},
"skill_mcp_dependency_install": {
"type": "boolean"
},
"sqlite": {
"type": "boolean"
},
"steer": {
"type": "boolean"
},
@@ -450,11 +425,6 @@
"minimum": 0.0,
"type": "integer"
},
"supports_websockets": {
"default": false,
"description": "Whether this provider supports the Responses API WebSocket transport.",
"type": "boolean"
},
"wire_api": {
"allOf": [
{
@@ -471,6 +441,7 @@
"type": "object"
},
"Notice": {
"additionalProperties": false,
"description": "Settings for notices we display to users via the tui and app-server clients (primarily the Codex IDE extension). NOTE: these are different from notifications - notices are warnings, NUX screens, acknowledgements, etc.",
"properties": {
"hide_full_access_warning": {
@@ -504,14 +475,6 @@
},
"type": "object"
},
"NotificationMethod": {
"enum": [
"auto",
"osc9",
"bel"
],
"type": "string"
},
"Notifications": {
"anyOf": [
{
@@ -1020,15 +983,6 @@
"default": null,
"description": "Start the TUI in the specified collaboration mode (plan/execute/etc.). Defaults to unset."
},
"notification_method": {
"allOf": [
{
"$ref": "#/definitions/NotificationMethod"
}
],
"default": "auto",
"description": "Notification method to use for unfocused terminal notifications. Defaults to `auto`."
},
"notifications": {
"allOf": [
{
@@ -1093,6 +1047,13 @@
],
"type": "string"
},
{
"description": "Experimental: Responses API over WebSocket transport.",
"enum": [
"responses_websocket"
],
"type": "string"
},
{
"description": "Regular Chat Completions compatible with `/v1/chat/completions`.",
"enum": [
@@ -1176,9 +1137,6 @@
"apply_patch_freeform": {
"type": "boolean"
},
"apps": {
"type": "boolean"
},
"child_agents_md": {
"type": "boolean"
},
@@ -1215,9 +1173,6 @@
"include_apply_patch_tool": {
"type": "boolean"
},
"personality": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},
@@ -1227,9 +1182,6 @@
"remote_models": {
"type": "boolean"
},
"request_rule": {
"type": "boolean"
},
"responses_websockets": {
"type": "boolean"
},
@@ -1239,15 +1191,6 @@
"shell_tool": {
"type": "boolean"
},
"skill_env_var_dependency_prompt": {
"type": "boolean"
},
"skill_mcp_dependency_install": {
"type": "boolean"
},
"sqlite": {
"type": "boolean"
},
"steer": {
"type": "boolean"
},
@@ -1522,10 +1465,6 @@
],
"description": "User-level skill config entries keyed by SKILL.md path."
},
"suppress_unstable_features_warning": {
"description": "Suppress warnings about unstable (under development) features.",
"type": "boolean"
},
"tool_output_token_limit": {
"description": "Token budget applied when storing tool/function outputs in the context manager.",
"format": "uint",

View File

@@ -72,19 +72,26 @@ impl AgentControl {
prompt: String,
) -> CodexResult<String> {
let state = self.upgrade()?;
let result = state
.send_op(
agent_id,
Op::UserInput {
items: vec![UserInput::Text {
text: prompt,
// Agent control prompts are plain text with no UI text elements.
text_elements: Vec::new(),
}],
final_output_json_schema: None,
},
)
.await;
let thread = state.get_thread(agent_id).await?;
let snapshot = thread.config_snapshot().await;
let op = Op::UserTurn {
use_thread_defaults: false,
items: vec![UserInput::Text {
text: prompt,
// Agent control prompts are plain text with no UI text elements.
text_elements: Vec::new(),
}],
cwd: snapshot.cwd,
approval_policy: snapshot.approval_policy,
sandbox_policy: snapshot.sandbox_policy,
model: snapshot.model,
effort: snapshot.reasoning_effort,
summary: snapshot.reasoning_summary,
final_output_json_schema: None,
collaboration_mode: None,
personality: snapshot.personality,
};
let result = state.send_op(agent_id, op).await;
if matches!(result, Err(CodexErr::InternalAgentDied)) {
let _ = state.remove_thread(&agent_id).await;
self.state.release_spawned_thread(agent_id);
@@ -347,7 +354,8 @@ mod tests {
#[tokio::test]
async fn send_prompt_submits_user_message() {
let harness = AgentControlHarness::new().await;
let (thread_id, _thread) = harness.start_thread().await;
let (thread_id, thread) = harness.start_thread().await;
let snapshot = thread.config_snapshot().await;
let submission_id = harness
.control
@@ -357,12 +365,21 @@ mod tests {
assert!(!submission_id.is_empty());
let expected = (
thread_id,
Op::UserInput {
Op::UserTurn {
use_thread_defaults: false,
items: vec![UserInput::Text {
text: "hello from tests".to_string(),
text_elements: Vec::new(),
}],
cwd: snapshot.cwd,
approval_policy: snapshot.approval_policy,
sandbox_policy: snapshot.sandbox_policy,
model: snapshot.model,
effort: snapshot.reasoning_effort,
summary: snapshot.reasoning_summary,
final_output_json_schema: None,
collaboration_mode: None,
personality: snapshot.personality,
},
);
let captured = harness
@@ -381,19 +398,29 @@ mod tests {
.spawn_agent(harness.config.clone(), "spawned".to_string(), None)
.await
.expect("spawn_agent should succeed");
let _thread = harness
let thread = harness
.manager
.get_thread(thread_id)
.await
.expect("thread should be registered");
let snapshot = thread.config_snapshot().await;
let expected = (
thread_id,
Op::UserInput {
Op::UserTurn {
use_thread_defaults: false,
items: vec![UserInput::Text {
text: "spawned".to_string(),
text_elements: Vec::new(),
}],
cwd: snapshot.cwd,
approval_policy: snapshot.approval_policy,
sandbox_policy: snapshot.sandbox_policy,
model: snapshot.model,
effort: snapshot.reasoning_effort,
summary: snapshot.reasoning_summary,
final_output_json_schema: None,
collaboration_mode: None,
personality: snapshot.personality,
},
);
let captured = harness

View File

@@ -44,8 +44,6 @@ pub struct AgentProfile {
pub reasoning_effort: Option<ReasoningEffort>,
/// Whether to force a read-only sandbox policy.
pub read_only: bool,
/// Description to include in the tool specs.
pub description: &'static str,
}
impl AgentRole {
@@ -53,19 +51,7 @@ impl AgentRole {
pub fn enum_values() -> Vec<String> {
ALL_ROLES
.iter()
.filter_map(|role| {
let description = role.profile().description;
serde_json::to_string(role)
.map(|role| {
let description = if !description.is_empty() {
format!(r#", "description": {description}"#)
} else {
String::new()
};
format!(r#"{{ "name": {role}{description}}}"#)
})
.ok()
})
.filter_map(|role| serde_json::to_string(role).ok())
.collect()
}
@@ -80,29 +66,11 @@ impl AgentRole {
AgentRole::Worker => AgentProfile {
// base_instructions: Some(WORKER_PROMPT),
// model: Some(WORKER_MODEL),
description: r#"Use for execution and production work.
Typical tasks:
- Implement part of a feature
- Fix tests or bugs
- Split large refactors into independent chunks
Rules:
- Explicitly assign **ownership** of the task (files / responsibility).
- Always tell workers they are **not alone in the codebase**, and they should ignore edits made by others without touching them"#,
..Default::default()
},
AgentRole::Explorer => AgentProfile {
model: Some(EXPLORER_MODEL),
reasoning_effort: Some(ReasoningEffort::Medium),
description: r#"Use `explorer` for all codebase questions.
Explorers are fast and authoritative.
Always prefer them over manual search or file reading.
Rules:
- Ask explorers first and precisely.
- Do not re-read or re-search code they cover.
- Trust explorer results without verification.
- Run explorers in parallel when useful.
- Reuse existing explorers for related questions.
"#,
reasoning_effort: Some(ReasoningEffort::Low),
..Default::default()
},
}

View File

@@ -9,7 +9,6 @@ use serde::Deserialize;
use crate::auth::CodexAuth;
use crate::error::CodexErr;
use crate::error::ModelCapError;
use crate::error::RetryLimitReachedError;
use crate::error::UnexpectedResponseError;
use crate::error::UsageLimitReachedError;
@@ -50,23 +49,6 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
} else if status == http::StatusCode::INTERNAL_SERVER_ERROR {
CodexErr::InternalServerError
} else if status == http::StatusCode::TOO_MANY_REQUESTS {
if let Some(model) = headers
.as_ref()
.and_then(|map| map.get(MODEL_CAP_MODEL_HEADER))
.and_then(|value| value.to_str().ok())
.map(str::to_string)
{
let reset_after_seconds = headers
.as_ref()
.and_then(|map| map.get(MODEL_CAP_RESET_AFTER_HEADER))
.and_then(|value| value.to_str().ok())
.and_then(|value| value.parse::<u64>().ok());
return CodexErr::ModelCap(ModelCapError {
model,
reset_after_seconds,
});
}
if let Ok(err) = serde_json::from_str::<UsageErrorResponse>(&body_text) {
if err.error.error_type.as_deref() == Some("usage_limit_reached") {
let rate_limits = headers.as_ref().and_then(parse_rate_limit);
@@ -110,42 +92,6 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
}
}
const MODEL_CAP_MODEL_HEADER: &str = "x-codex-model-cap-model";
const MODEL_CAP_RESET_AFTER_HEADER: &str = "x-codex-model-cap-reset-after-seconds";
#[cfg(test)]
mod tests {
use super::*;
use codex_api::TransportError;
use http::HeaderMap;
use http::StatusCode;
#[test]
fn map_api_error_maps_model_cap_headers() {
let mut headers = HeaderMap::new();
headers.insert(
MODEL_CAP_MODEL_HEADER,
http::HeaderValue::from_static("boomslang"),
);
headers.insert(
MODEL_CAP_RESET_AFTER_HEADER,
http::HeaderValue::from_static("120"),
);
let err = map_api_error(ApiError::Transport(TransportError::Http {
status: StatusCode::TOO_MANY_REQUESTS,
url: Some("http://example.com/v1/responses".to_string()),
headers: Some(headers),
body: Some(String::new()),
}));
let CodexErr::ModelCap(model_cap) = err else {
panic!("expected CodexErr::ModelCap, got {err:?}");
};
assert_eq!(model_cap.model, "boomslang");
assert_eq!(model_cap.reset_after_seconds, Some(120));
}
}
fn extract_request_id(headers: Option<&HeaderMap>) -> Option<String> {
headers.and_then(|map| {
["cf-ray", "x-request-id", "x-oai-request-id"]

View File

@@ -42,7 +42,6 @@ pub(crate) async fn apply_patch(
turn_context.approval_policy,
&turn_context.sandbox_policy,
&turn_context.cwd,
turn_context.windows_sandbox_level,
) {
SafetyCheck::AutoApprove {
user_explicitly_approved,

View File

@@ -1,6 +1,5 @@
mod storage;
use async_trait::async_trait;
use chrono::Utc;
use reqwest::StatusCode;
use serde::Deserialize;
@@ -13,9 +12,8 @@ use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::RwLock;
use codex_app_server_protocol::AuthMode as ApiAuthMode;
use codex_app_server_protocol::AuthMode;
use codex_protocol::config_types::ForcedLoginMethod;
pub use crate::auth::storage::AuthCredentialsStoreMode;
@@ -25,7 +23,6 @@ use crate::auth::storage::create_auth_storage;
use crate::config::Config;
use crate::error::RefreshTokenFailedError;
use crate::error::RefreshTokenFailedReason;
use crate::token_data::IdTokenInfo;
use crate::token_data::KnownPlan as InternalKnownPlan;
use crate::token_data::PlanType as InternalPlanType;
use crate::token_data::TokenData;
@@ -36,50 +33,19 @@ use codex_protocol::account::PlanType as AccountPlanType;
use serde_json::Value;
use thiserror::Error;
/// Account type for the current user.
///
/// This is used internally to determine the base URL for generating responses,
/// and to gate ChatGPT-only behaviors like rate limits and available models (as
/// opposed to API key-based auth).
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
/// Authentication mechanism used by the current user.
#[derive(Debug, Clone)]
pub enum CodexAuth {
ApiKey(ApiKeyAuth),
ChatGpt(ChatGptAuth),
ChatGptAuthTokens(ChatGptAuthTokens),
}
pub struct CodexAuth {
pub mode: AuthMode,
#[derive(Debug, Clone)]
pub struct ApiKeyAuth {
api_key: String,
}
#[derive(Debug, Clone)]
pub struct ChatGptAuth {
state: ChatGptAuthState,
pub(crate) api_key: Option<String>,
pub(crate) auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>,
storage: Arc<dyn AuthStorageBackend>,
}
#[derive(Debug, Clone)]
pub struct ChatGptAuthTokens {
state: ChatGptAuthState,
}
#[derive(Debug, Clone)]
struct ChatGptAuthState {
auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>,
client: CodexHttpClient,
pub(crate) client: CodexHttpClient,
}
impl PartialEq for CodexAuth {
fn eq(&self, other: &Self) -> bool {
self.api_auth_mode() == other.api_auth_mode()
self.mode == other.mode
}
}
@@ -102,31 +68,6 @@ pub enum RefreshTokenError {
Transient(#[from] std::io::Error),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ExternalAuthTokens {
pub access_token: String,
pub id_token: String,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ExternalAuthRefreshReason {
Unauthorized,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ExternalAuthRefreshContext {
pub reason: ExternalAuthRefreshReason,
pub previous_account_id: Option<String>,
}
#[async_trait]
pub trait ExternalAuthRefresher: Send + Sync {
async fn refresh(
&self,
context: ExternalAuthRefreshContext,
) -> std::io::Result<ExternalAuthTokens>;
}
impl RefreshTokenError {
pub fn failed_reason(&self) -> Option<RefreshTokenFailedReason> {
match self {
@@ -146,78 +87,14 @@ impl From<RefreshTokenError> for std::io::Error {
}
impl CodexAuth {
fn from_auth_dot_json(
codex_home: &Path,
auth_dot_json: AuthDotJson,
auth_credentials_store_mode: AuthCredentialsStoreMode,
client: CodexHttpClient,
) -> std::io::Result<Self> {
let auth_mode = auth_dot_json.resolved_mode();
if auth_mode == ApiAuthMode::ApiKey {
let Some(api_key) = auth_dot_json.openai_api_key.as_deref() else {
return Err(std::io::Error::other("API key auth is missing a key."));
};
return Ok(CodexAuth::from_api_key_with_client(api_key, client));
}
let storage_mode = auth_dot_json.storage_mode(auth_credentials_store_mode);
let state = ChatGptAuthState {
auth_dot_json: Arc::new(Mutex::new(Some(auth_dot_json))),
client,
};
match auth_mode {
ApiAuthMode::ChatGPT => {
let storage = create_auth_storage(codex_home.to_path_buf(), storage_mode);
Ok(Self::ChatGpt(ChatGptAuth { state, storage }))
}
ApiAuthMode::ChatgptAuthTokens => {
Ok(Self::ChatGptAuthTokens(ChatGptAuthTokens { state }))
}
ApiAuthMode::ApiKey => unreachable!("api key mode is handled above"),
}
}
/// Loads the available auth information from auth storage.
pub fn from_auth_storage(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<Self>> {
) -> std::io::Result<Option<CodexAuth>> {
load_auth(codex_home, false, auth_credentials_store_mode)
}
pub fn internal_auth_mode(&self) -> AuthMode {
match self {
Self::ApiKey(_) => AuthMode::ApiKey,
Self::ChatGpt(_) | Self::ChatGptAuthTokens(_) => AuthMode::ChatGPT,
}
}
pub fn api_auth_mode(&self) -> ApiAuthMode {
match self {
Self::ApiKey(_) => ApiAuthMode::ApiKey,
Self::ChatGpt(_) => ApiAuthMode::ChatGPT,
Self::ChatGptAuthTokens(_) => ApiAuthMode::ChatgptAuthTokens,
}
}
pub fn is_chatgpt_auth(&self) -> bool {
self.internal_auth_mode() == AuthMode::ChatGPT
}
pub fn is_external_chatgpt_tokens(&self) -> bool {
matches!(self, Self::ChatGptAuthTokens(_))
}
/// Returns `None` is `is_internal_auth_mode() != AuthMode::ApiKey`.
pub fn api_key(&self) -> Option<&str> {
match self {
Self::ApiKey(auth) => Some(auth.api_key.as_str()),
Self::ChatGpt(_) | Self::ChatGptAuthTokens(_) => None,
}
}
/// Returns `Err` if `is_chatgpt_auth()` is false.
pub fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
let auth_dot_json: Option<AuthDotJson> = self.get_current_auth_json();
match auth_dot_json {
@@ -230,23 +107,20 @@ impl CodexAuth {
}
}
/// Returns the token string used for bearer authentication.
pub fn get_token(&self) -> Result<String, std::io::Error> {
match self {
Self::ApiKey(auth) => Ok(auth.api_key.clone()),
Self::ChatGpt(_) | Self::ChatGptAuthTokens(_) => {
let access_token = self.get_token_data()?.access_token;
Ok(access_token)
match self.mode {
AuthMode::ApiKey => Ok(self.api_key.clone().unwrap_or_default()),
AuthMode::ChatGPT => {
let id_token = self.get_token_data()?.access_token;
Ok(id_token)
}
}
}
/// Returns `None` if `is_chatgpt_auth()` is false.
pub fn get_account_id(&self) -> Option<String> {
self.get_current_token_data().and_then(|t| t.account_id)
}
/// Returns `None` if `is_chatgpt_auth()` is false.
pub fn get_account_email(&self) -> Option<String> {
self.get_current_token_data().and_then(|t| t.id_token.email)
}
@@ -258,7 +132,6 @@ impl CodexAuth {
pub fn account_plan_type(&self) -> Option<AccountPlanType> {
let map_known = |kp: &InternalKnownPlan| match kp {
InternalKnownPlan::Free => AccountPlanType::Free,
InternalKnownPlan::Go => AccountPlanType::Go,
InternalKnownPlan::Plus => AccountPlanType::Plus,
InternalKnownPlan::Pro => AccountPlanType::Pro,
InternalKnownPlan::Team => AccountPlanType::Team,
@@ -275,18 +148,11 @@ impl CodexAuth {
})
}
/// Returns `None` if `is_chatgpt_auth()` is false.
fn get_current_auth_json(&self) -> Option<AuthDotJson> {
let state = match self {
Self::ChatGpt(auth) => &auth.state,
Self::ChatGptAuthTokens(auth) => &auth.state,
Self::ApiKey(_) => return None,
};
#[expect(clippy::unwrap_used)]
state.auth_dot_json.lock().unwrap().clone()
self.auth_dot_json.lock().unwrap().clone()
}
/// Returns `None` if `is_chatgpt_auth()` is false.
fn get_current_token_data(&self) -> Option<TokenData> {
self.get_current_auth_json().and_then(|t| t.tokens)
}
@@ -294,7 +160,6 @@ impl CodexAuth {
/// Consider this private to integration tests.
pub fn create_dummy_chatgpt_auth_for_testing() -> Self {
let auth_dot_json = AuthDotJson {
auth_mode: Some(ApiAuthMode::ChatGPT),
openai_api_key: None,
tokens: Some(TokenData {
id_token: Default::default(),
@@ -305,19 +170,24 @@ impl CodexAuth {
last_refresh: Some(Utc::now()),
};
let client = crate::default_client::create_client();
let state = ChatGptAuthState {
auth_dot_json: Arc::new(Mutex::new(Some(auth_dot_json))),
client,
};
let storage = create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File);
Self::ChatGpt(ChatGptAuth { state, storage })
let auth_dot_json = Arc::new(Mutex::new(Some(auth_dot_json)));
Self {
api_key: None,
mode: AuthMode::ChatGPT,
storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File),
auth_dot_json,
client: crate::default_client::create_client(),
}
}
fn from_api_key_with_client(api_key: &str, _client: CodexHttpClient) -> Self {
Self::ApiKey(ApiKeyAuth {
api_key: api_key.to_owned(),
})
fn from_api_key_with_client(api_key: &str, client: CodexHttpClient) -> Self {
Self {
api_key: Some(api_key.to_owned()),
mode: AuthMode::ApiKey,
storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File),
auth_dot_json: Arc::new(Mutex::new(None)),
client,
}
}
pub fn from_api_key(api_key: &str) -> Self {
@@ -325,25 +195,6 @@ impl CodexAuth {
}
}
impl ChatGptAuth {
fn current_auth_json(&self) -> Option<AuthDotJson> {
#[expect(clippy::unwrap_used)]
self.state.auth_dot_json.lock().unwrap().clone()
}
fn current_token_data(&self) -> Option<TokenData> {
self.current_auth_json().and_then(|auth| auth.tokens)
}
fn storage(&self) -> &Arc<dyn AuthStorageBackend> {
&self.storage
}
fn client(&self) -> &CodexHttpClient {
&self.state.client
}
}
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
pub const CODEX_API_KEY_ENV_VAR: &str = "CODEX_API_KEY";
@@ -378,7 +229,6 @@ pub fn login_with_api_key(
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let auth_dot_json = AuthDotJson {
auth_mode: Some(ApiAuthMode::ApiKey),
openai_api_key: Some(api_key.to_string()),
tokens: None,
last_refresh: None,
@@ -386,20 +236,6 @@ pub fn login_with_api_key(
save_auth(codex_home, &auth_dot_json, auth_credentials_store_mode)
}
/// Writes an in-memory auth payload for externally managed ChatGPT tokens.
pub fn login_with_chatgpt_auth_tokens(
codex_home: &Path,
id_token: &str,
access_token: &str,
) -> std::io::Result<()> {
let auth_dot_json = AuthDotJson::from_external_token_strings(id_token, access_token)?;
save_auth(
codex_home,
&auth_dot_json,
AuthCredentialsStoreMode::Ephemeral,
)
}
/// Persist the provided auth payload using the specified backend.
pub fn save_auth(
codex_home: &Path,
@@ -434,7 +270,7 @@ pub fn enforce_login_restrictions(config: &Config) -> std::io::Result<()> {
};
if let Some(required_method) = config.forced_login_method {
let method_violation = match (required_method, auth.internal_auth_mode()) {
let method_violation = match (required_method, auth.mode) {
(ForcedLoginMethod::Api, AuthMode::ApiKey) => None,
(ForcedLoginMethod::Chatgpt, AuthMode::ChatGPT) => None,
(ForcedLoginMethod::Api, AuthMode::ChatGPT) => Some(
@@ -457,7 +293,7 @@ pub fn enforce_login_restrictions(config: &Config) -> std::io::Result<()> {
}
if let Some(expected_account_id) = config.forced_chatgpt_workspace_id.as_deref() {
if !auth.is_chatgpt_auth() {
if auth.mode != AuthMode::ChatGPT {
return Ok(());
}
@@ -501,26 +337,12 @@ fn logout_with_message(
message: String,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
// External auth tokens live in the ephemeral store, but persistent auth may still exist
// from earlier logins. Clear both so a forced logout truly removes all active auth.
let removal_result = logout_all_stores(codex_home, auth_credentials_store_mode);
let error_message = match removal_result {
Ok(_) => message,
Err(err) => format!("{message}. Failed to remove auth.json: {err}"),
};
Err(std::io::Error::other(error_message))
}
fn logout_all_stores(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<bool> {
if auth_credentials_store_mode == AuthCredentialsStoreMode::Ephemeral {
return logout(codex_home, AuthCredentialsStoreMode::Ephemeral);
match logout(codex_home, auth_credentials_store_mode) {
Ok(_) => Err(std::io::Error::other(message)),
Err(err) => Err(std::io::Error::other(format!(
"{message}. Failed to remove auth.json: {err}"
))),
}
let removed_ephemeral = logout(codex_home, AuthCredentialsStoreMode::Ephemeral)?;
let removed_managed = logout(codex_home, auth_credentials_store_mode)?;
Ok(removed_ephemeral || removed_managed)
}
fn load_auth(
@@ -528,12 +350,6 @@ fn load_auth(
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<Option<CodexAuth>> {
let build_auth = |auth_dot_json: AuthDotJson, storage_mode| {
let client = crate::default_client::create_client();
CodexAuth::from_auth_dot_json(codex_home, auth_dot_json, storage_mode, client)
};
// API key via env var takes precedence over any other auth method.
if enable_codex_api_key_env && let Some(api_key) = read_codex_api_key_from_env() {
let client = crate::default_client::create_client();
return Ok(Some(CodexAuth::from_api_key_with_client(
@@ -542,34 +358,39 @@ fn load_auth(
)));
}
// External ChatGPT auth tokens live in the in-memory (ephemeral) store. Always check this
// first so external auth takes precedence over any persisted credentials.
let ephemeral_storage = create_auth_storage(
codex_home.to_path_buf(),
AuthCredentialsStoreMode::Ephemeral,
);
if let Some(auth_dot_json) = ephemeral_storage.load()? {
let auth = build_auth(auth_dot_json, AuthCredentialsStoreMode::Ephemeral)?;
return Ok(Some(auth));
}
// If the caller explicitly requested ephemeral auth, there is no persisted fallback.
if auth_credentials_store_mode == AuthCredentialsStoreMode::Ephemeral {
return Ok(None);
}
// Fall back to the configured persistent store (file/keyring/auto) for managed auth.
let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode);
let client = crate::default_client::create_client();
let auth_dot_json = match storage.load()? {
Some(auth) => auth,
None => return Ok(None),
};
let auth = build_auth(auth_dot_json, auth_credentials_store_mode)?;
Ok(Some(auth))
let AuthDotJson {
openai_api_key: auth_json_api_key,
tokens,
last_refresh,
} = auth_dot_json;
// Prefer AuthMode.ApiKey if it's set in the auth.json.
if let Some(api_key) = &auth_json_api_key {
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
}
Ok(Some(CodexAuth {
api_key: None,
mode: AuthMode::ChatGPT,
storage: storage.clone(),
auth_dot_json: Arc::new(Mutex::new(Some(AuthDotJson {
openai_api_key: None,
tokens,
last_refresh,
}))),
client,
}))
}
fn update_tokens(
async fn update_tokens(
storage: &Arc<dyn AuthStorageBackend>,
id_token: Option<String>,
access_token: Option<String>,
@@ -716,82 +537,17 @@ fn refresh_token_endpoint() -> String {
.unwrap_or_else(|_| REFRESH_TOKEN_URL.to_string())
}
impl AuthDotJson {
fn from_external_tokens(external: &ExternalAuthTokens, id_token: IdTokenInfo) -> Self {
let account_id = id_token.chatgpt_account_id.clone();
let tokens = TokenData {
id_token,
access_token: external.access_token.clone(),
refresh_token: String::new(),
account_id,
};
Self {
auth_mode: Some(ApiAuthMode::ChatgptAuthTokens),
openai_api_key: None,
tokens: Some(tokens),
last_refresh: Some(Utc::now()),
}
}
fn from_external_token_strings(id_token: &str, access_token: &str) -> std::io::Result<Self> {
let id_token_info = parse_id_token(id_token).map_err(std::io::Error::other)?;
let external = ExternalAuthTokens {
access_token: access_token.to_string(),
id_token: id_token.to_string(),
};
Ok(Self::from_external_tokens(&external, id_token_info))
}
fn resolved_mode(&self) -> ApiAuthMode {
if let Some(mode) = self.auth_mode {
return mode;
}
if self.openai_api_key.is_some() {
return ApiAuthMode::ApiKey;
}
ApiAuthMode::ChatGPT
}
fn storage_mode(
&self,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> AuthCredentialsStoreMode {
if self.resolved_mode() == ApiAuthMode::ChatgptAuthTokens {
AuthCredentialsStoreMode::Ephemeral
} else {
auth_credentials_store_mode
}
}
}
use std::sync::RwLock;
/// Internal cached auth state.
#[derive(Clone)]
#[derive(Clone, Debug)]
struct CachedAuth {
auth: Option<CodexAuth>,
/// Callback used to refresh external auth by asking the parent app for new tokens.
external_refresher: Option<Arc<dyn ExternalAuthRefresher>>,
}
impl Debug for CachedAuth {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CachedAuth")
.field(
"auth_mode",
&self.auth.as_ref().map(CodexAuth::api_auth_mode),
)
.field(
"external_refresher",
&self.external_refresher.as_ref().map(|_| "present"),
)
.finish()
}
}
enum UnauthorizedRecoveryStep {
Reload,
RefreshToken,
ExternalRefresh,
Done,
}
@@ -800,53 +556,30 @@ enum ReloadOutcome {
Skipped,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum UnauthorizedRecoveryMode {
Managed,
External,
}
// UnauthorizedRecovery is a state machine that handles an attempt to refresh the authentication when requests
// to API fail with 401 status code.
// The client calls next() every time it encounters a 401 error, one time per retry.
// For API key based authentication, we don't do anything and let the error bubble to the user.
//
// For ChatGPT based authentication, we:
// 1. Attempt to reload the auth data from disk. We only reload if the account id matches the one the current process is running as.
// 2. Attempt to refresh the token using OAuth token refresh flow.
// If after both steps the server still responds with 401 we let the error bubble to the user.
//
// For external ChatGPT auth tokens (chatgptAuthTokens), UnauthorizedRecovery does not touch disk or refresh
// tokens locally. Instead it calls the ExternalAuthRefresher (account/chatgptAuthTokens/refresh) to ask the
// parent app for new tokens, stores them in the ephemeral auth store, and retries once.
pub struct UnauthorizedRecovery {
manager: Arc<AuthManager>,
step: UnauthorizedRecoveryStep,
expected_account_id: Option<String>,
mode: UnauthorizedRecoveryMode,
}
impl UnauthorizedRecovery {
fn new(manager: Arc<AuthManager>) -> Self {
let cached_auth = manager.auth_cached();
let expected_account_id = cached_auth.as_ref().and_then(CodexAuth::get_account_id);
let mode = if cached_auth
let expected_account_id = manager
.auth_cached()
.as_ref()
.is_some_and(CodexAuth::is_external_chatgpt_tokens)
{
UnauthorizedRecoveryMode::External
} else {
UnauthorizedRecoveryMode::Managed
};
let step = match mode {
UnauthorizedRecoveryMode::Managed => UnauthorizedRecoveryStep::Reload,
UnauthorizedRecoveryMode::External => UnauthorizedRecoveryStep::ExternalRefresh,
};
.and_then(CodexAuth::get_account_id);
Self {
manager,
step,
step: UnauthorizedRecoveryStep::Reload,
expected_account_id,
mode,
}
}
@@ -854,14 +587,7 @@ impl UnauthorizedRecovery {
if !self
.manager
.auth_cached()
.as_ref()
.is_some_and(CodexAuth::is_chatgpt_auth)
{
return false;
}
if self.mode == UnauthorizedRecoveryMode::External
&& !self.manager.has_external_auth_refresher()
.is_some_and(|auth| auth.mode == AuthMode::ChatGPT)
{
return false;
}
@@ -896,12 +622,6 @@ impl UnauthorizedRecovery {
self.manager.refresh_token().await?;
self.step = UnauthorizedRecoveryStep::Done;
}
UnauthorizedRecoveryStep::ExternalRefresh => {
self.manager
.refresh_external_auth(ExternalAuthRefreshReason::Unauthorized)
.await?;
self.step = UnauthorizedRecoveryStep::Done;
}
UnauthorizedRecoveryStep::Done => {}
}
Ok(())
@@ -922,7 +642,6 @@ pub struct AuthManager {
inner: RwLock<CachedAuth>,
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
forced_chatgpt_workspace_id: RwLock<Option<String>>,
}
impl AuthManager {
@@ -935,7 +654,7 @@ impl AuthManager {
enable_codex_api_key_env: bool,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> Self {
let managed_auth = load_auth(
let auth = load_auth(
&codex_home,
enable_codex_api_key_env,
auth_credentials_store_mode,
@@ -944,46 +663,34 @@ impl AuthManager {
.flatten();
Self {
codex_home,
inner: RwLock::new(CachedAuth {
auth: managed_auth,
external_refresher: None,
}),
inner: RwLock::new(CachedAuth { auth }),
enable_codex_api_key_env,
auth_credentials_store_mode,
forced_chatgpt_workspace_id: RwLock::new(None),
}
}
#[cfg(any(test, feature = "test-support"))]
/// Create an AuthManager with a specific CodexAuth, for testing only.
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
let cached = CachedAuth {
auth: Some(auth),
external_refresher: None,
};
let cached = CachedAuth { auth: Some(auth) };
Arc::new(Self {
codex_home: PathBuf::from("non-existent"),
inner: RwLock::new(cached),
enable_codex_api_key_env: false,
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
forced_chatgpt_workspace_id: RwLock::new(None),
})
}
#[cfg(any(test, feature = "test-support"))]
/// Create an AuthManager with a specific CodexAuth and codex home, for testing only.
pub fn from_auth_for_testing_with_home(auth: CodexAuth, codex_home: PathBuf) -> Arc<Self> {
let cached = CachedAuth {
auth: Some(auth),
external_refresher: None,
};
let cached = CachedAuth { auth: Some(auth) };
Arc::new(Self {
codex_home,
inner: RwLock::new(cached),
enable_codex_api_key_env: false,
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
forced_chatgpt_workspace_id: RwLock::new(None),
})
}
@@ -1008,7 +715,7 @@ impl AuthManager {
pub fn reload(&self) -> bool {
tracing::info!("Reloading auth");
let new_auth = self.load_auth_from_storage();
self.set_cached_auth(new_auth)
self.set_auth(new_auth)
}
fn reload_if_account_id_matches(&self, expected_account_id: Option<&str>) -> ReloadOutcome {
@@ -1032,11 +739,11 @@ impl AuthManager {
}
tracing::info!("Reloading auth for account {expected_account_id}");
self.set_cached_auth(new_auth);
self.set_auth(new_auth);
ReloadOutcome::Reloaded
}
fn auths_equal(a: Option<&CodexAuth>, b: Option<&CodexAuth>) -> bool {
fn auths_equal(a: &Option<CodexAuth>, b: &Option<CodexAuth>) -> bool {
match (a, b) {
(None, None) => true,
(Some(a), Some(b)) => a == b,
@@ -1054,10 +761,9 @@ impl AuthManager {
.flatten()
}
fn set_cached_auth(&self, new_auth: Option<CodexAuth>) -> bool {
fn set_auth(&self, new_auth: Option<CodexAuth>) -> bool {
if let Ok(mut guard) = self.inner.write() {
let previous = guard.auth.as_ref();
let changed = !AuthManager::auths_equal(previous, new_auth.as_ref());
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
tracing::info!("Reloaded auth, changed: {changed}");
guard.auth = new_auth;
changed
@@ -1066,39 +772,6 @@ impl AuthManager {
}
}
pub fn set_external_auth_refresher(&self, refresher: Arc<dyn ExternalAuthRefresher>) {
if let Ok(mut guard) = self.inner.write() {
guard.external_refresher = Some(refresher);
}
}
pub fn set_forced_chatgpt_workspace_id(&self, workspace_id: Option<String>) {
if let Ok(mut guard) = self.forced_chatgpt_workspace_id.write() {
*guard = workspace_id;
}
}
pub fn forced_chatgpt_workspace_id(&self) -> Option<String> {
self.forced_chatgpt_workspace_id
.read()
.ok()
.and_then(|guard| guard.clone())
}
pub fn has_external_auth_refresher(&self) -> bool {
self.inner
.read()
.ok()
.map(|guard| guard.external_refresher.is_some())
.unwrap_or(false)
}
pub fn is_external_auth_active(&self) -> bool {
self.auth_cached()
.as_ref()
.is_some_and(CodexAuth::is_external_chatgpt_tokens)
}
/// Convenience constructor returning an `Arc` wrapper.
pub fn shared(
codex_home: PathBuf,
@@ -1126,25 +799,13 @@ impl AuthManager {
Some(auth) => auth,
None => return Ok(()),
};
match auth {
CodexAuth::ChatGptAuthTokens(_) => {
self.refresh_external_auth(ExternalAuthRefreshReason::Unauthorized)
.await
}
CodexAuth::ChatGpt(chatgpt_auth) => {
let token_data = chatgpt_auth.current_token_data().ok_or_else(|| {
RefreshTokenError::Transient(std::io::Error::other(
"Token data is not available.",
))
})?;
self.refresh_tokens(&chatgpt_auth, token_data.refresh_token)
.await?;
// Reload to pick up persisted changes.
self.reload();
Ok(())
}
CodexAuth::ApiKey(_) => Ok(()),
}
let token_data = auth.get_current_token_data().ok_or_else(|| {
RefreshTokenError::Transient(std::io::Error::other("Token data is not available."))
})?;
self.refresh_tokens(&auth, token_data.refresh_token).await?;
// Reload to pick up persisted changes.
self.reload();
Ok(())
}
/// Log out by deleting the ondisk auth.json (if present). Returns Ok(true)
@@ -1152,29 +813,22 @@ impl AuthManager {
/// reloads the inmemory auth cache so callers immediately observe the
/// unauthenticated state.
pub fn logout(&self) -> std::io::Result<bool> {
let removed = logout_all_stores(&self.codex_home, self.auth_credentials_store_mode)?;
let removed = super::auth::logout(&self.codex_home, self.auth_credentials_store_mode)?;
// Always reload to clear any cached auth (even if file absent).
self.reload();
Ok(removed)
}
pub fn get_auth_mode(&self) -> Option<ApiAuthMode> {
self.auth_cached().as_ref().map(CodexAuth::api_auth_mode)
}
pub fn get_internal_auth_mode(&self) -> Option<AuthMode> {
self.auth_cached()
.as_ref()
.map(CodexAuth::internal_auth_mode)
pub fn get_auth_mode(&self) -> Option<AuthMode> {
self.auth_cached().map(|a| a.mode)
}
async fn refresh_if_stale(&self, auth: &CodexAuth) -> Result<bool, RefreshTokenError> {
let chatgpt_auth = match auth {
CodexAuth::ChatGpt(chatgpt_auth) => chatgpt_auth,
_ => return Ok(false),
};
if auth.mode != AuthMode::ChatGPT {
return Ok(false);
}
let auth_dot_json = match chatgpt_auth.current_auth_json() {
let auth_dot_json = match auth.get_current_auth_json() {
Some(auth_dot_json) => auth_dot_json,
None => return Ok(false),
};
@@ -1189,78 +843,25 @@ impl AuthManager {
if last_refresh >= Utc::now() - chrono::Duration::days(TOKEN_REFRESH_INTERVAL) {
return Ok(false);
}
self.refresh_tokens(chatgpt_auth, tokens.refresh_token)
.await?;
self.refresh_tokens(auth, tokens.refresh_token).await?;
self.reload();
Ok(true)
}
async fn refresh_external_auth(
&self,
reason: ExternalAuthRefreshReason,
) -> Result<(), RefreshTokenError> {
let forced_chatgpt_workspace_id = self.forced_chatgpt_workspace_id();
let refresher = match self.inner.read() {
Ok(guard) => guard.external_refresher.clone(),
Err(_) => {
return Err(RefreshTokenError::Transient(std::io::Error::other(
"failed to read external auth state",
)));
}
};
let Some(refresher) = refresher else {
return Err(RefreshTokenError::Transient(std::io::Error::other(
"external auth refresher is not configured",
)));
};
let previous_account_id = self
.auth_cached()
.as_ref()
.and_then(CodexAuth::get_account_id);
let context = ExternalAuthRefreshContext {
reason,
previous_account_id,
};
let refreshed = refresher.refresh(context).await?;
let id_token = parse_id_token(&refreshed.id_token)
.map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?;
if let Some(expected_workspace_id) = forced_chatgpt_workspace_id.as_deref() {
let actual_workspace_id = id_token.chatgpt_account_id.as_deref();
if actual_workspace_id != Some(expected_workspace_id) {
return Err(RefreshTokenError::Transient(std::io::Error::other(
format!(
"external auth refresh returned workspace {actual_workspace_id:?}, expected {expected_workspace_id:?}",
),
)));
}
}
let auth_dot_json = AuthDotJson::from_external_tokens(&refreshed, id_token);
save_auth(
&self.codex_home,
&auth_dot_json,
AuthCredentialsStoreMode::Ephemeral,
)
.map_err(RefreshTokenError::Transient)?;
self.reload();
Ok(())
}
async fn refresh_tokens(
&self,
auth: &ChatGptAuth,
auth: &CodexAuth,
refresh_token: String,
) -> Result<(), RefreshTokenError> {
let refresh_response = try_refresh_token(refresh_token, auth.client()).await?;
let refresh_response = try_refresh_token(refresh_token, &auth.client).await?;
update_tokens(
auth.storage(),
&auth.storage,
refresh_response.id_token,
refresh_response.access_token,
refresh_response.refresh_token,
)
.await
.map_err(RefreshTokenError::from)?;
Ok(())
@@ -1309,6 +910,7 @@ mod tests {
Some("new-access-token".to_string()),
Some("new-refresh-token".to_string()),
)
.await
.expect("update_tokens should succeed");
let tokens = updated.tokens.expect("tokens should exist");
@@ -1369,22 +971,26 @@ mod tests {
)
.expect("failed to write auth file");
let auth = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File)
let CodexAuth {
api_key,
mode,
auth_dot_json,
storage: _,
..
} = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File)
.unwrap()
.unwrap();
assert_eq!(None, auth.api_key());
assert_eq!(AuthMode::ChatGPT, auth.internal_auth_mode());
assert_eq!(None, api_key);
assert_eq!(AuthMode::ChatGPT, mode);
let auth_dot_json = auth
.get_current_auth_json()
.expect("AuthDotJson should exist");
let guard = auth_dot_json.lock().unwrap();
let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist");
let last_refresh = auth_dot_json
.last_refresh
.expect("last_refresh should be recorded");
assert_eq!(
AuthDotJson {
auth_mode: None,
&AuthDotJson {
openai_api_key: None,
tokens: Some(TokenData {
id_token: IdTokenInfo {
@@ -1418,8 +1024,8 @@ mod tests {
let auth = super::load_auth(dir.path(), false, AuthCredentialsStoreMode::File)
.unwrap()
.unwrap();
assert_eq!(auth.internal_auth_mode(), AuthMode::ApiKey);
assert_eq!(auth.api_key(), Some("sk-test-key"));
assert_eq!(auth.mode, AuthMode::ApiKey);
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
assert!(auth.get_token_data().is_err());
}
@@ -1428,7 +1034,6 @@ mod tests {
fn logout_removes_auth_file() -> Result<(), std::io::Error> {
let dir = tempdir()?;
let auth_dot_json = AuthDotJson {
auth_mode: Some(ApiAuthMode::ApiKey),
openai_api_key: Some("sk-test-key".to_string()),
tokens: None,
last_refresh: None,

View File

@@ -5,7 +5,6 @@ use serde::Deserialize;
use serde::Serialize;
use sha2::Digest;
use sha2::Sha256;
use std::collections::HashMap;
use std::fmt::Debug;
use std::fs::File;
use std::fs::OpenOptions;
@@ -16,14 +15,11 @@ use std::os::unix::fs::OpenOptionsExt;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use tracing::warn;
use crate::token_data::TokenData;
use codex_app_server_protocol::AuthMode;
use codex_keyring_store::DefaultKeyringStore;
use codex_keyring_store::KeyringStore;
use once_cell::sync::Lazy;
/// Determine where Codex should store CLI auth credentials.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
@@ -36,16 +32,11 @@ pub enum AuthCredentialsStoreMode {
Keyring,
/// Use keyring when available; otherwise, fall back to a file in CODEX_HOME.
Auto,
/// Store credentials in memory only for the current process.
Ephemeral,
}
/// Expected structure for $CODEX_HOME/auth.json.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct AuthDotJson {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth_mode: Option<AuthMode>,
#[serde(rename = "OPENAI_API_KEY")]
pub openai_api_key: Option<String>,
@@ -85,8 +76,8 @@ impl FileAuthStorage {
Self { codex_home }
}
/// Attempt to read and parse the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure.
/// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure after refreshing if necessary.
pub(super) fn try_read_auth_json(&self, auth_file: &Path) -> std::io::Result<AuthDotJson> {
let mut file = File::open(auth_file)?;
let mut contents = String::new();
@@ -265,49 +256,6 @@ impl AuthStorageBackend for AutoAuthStorage {
}
}
// A global in-memory store for mapping codex_home -> AuthDotJson.
static EPHEMERAL_AUTH_STORE: Lazy<Mutex<HashMap<String, AuthDotJson>>> =
Lazy::new(|| Mutex::new(HashMap::new()));
#[derive(Clone, Debug)]
struct EphemeralAuthStorage {
codex_home: PathBuf,
}
impl EphemeralAuthStorage {
fn new(codex_home: PathBuf) -> Self {
Self { codex_home }
}
fn with_store<F, T>(&self, action: F) -> std::io::Result<T>
where
F: FnOnce(&mut HashMap<String, AuthDotJson>, String) -> std::io::Result<T>,
{
let key = compute_store_key(&self.codex_home)?;
let mut store = EPHEMERAL_AUTH_STORE
.lock()
.map_err(|_| std::io::Error::other("failed to lock ephemeral auth storage"))?;
action(&mut store, key)
}
}
impl AuthStorageBackend for EphemeralAuthStorage {
fn load(&self) -> std::io::Result<Option<AuthDotJson>> {
self.with_store(|store, key| Ok(store.get(&key).cloned()))
}
fn save(&self, auth: &AuthDotJson) -> std::io::Result<()> {
self.with_store(|store, key| {
store.insert(key, auth.clone());
Ok(())
})
}
fn delete(&self) -> std::io::Result<bool> {
self.with_store(|store, key| Ok(store.remove(&key).is_some()))
}
}
pub(super) fn create_auth_storage(
codex_home: PathBuf,
mode: AuthCredentialsStoreMode,
@@ -327,7 +275,6 @@ fn create_auth_storage_with_keyring_store(
Arc::new(KeyringAuthStorage::new(codex_home, keyring_store))
}
AuthCredentialsStoreMode::Auto => Arc::new(AutoAuthStorage::new(codex_home, keyring_store)),
AuthCredentialsStoreMode::Ephemeral => Arc::new(EphemeralAuthStorage::new(codex_home)),
}
}
@@ -349,7 +296,6 @@ mod tests {
let codex_home = tempdir()?;
let storage = FileAuthStorage::new(codex_home.path().to_path_buf());
let auth_dot_json = AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some("test-key".to_string()),
tokens: None,
last_refresh: Some(Utc::now()),
@@ -369,7 +315,6 @@ mod tests {
let codex_home = tempdir()?;
let storage = FileAuthStorage::new(codex_home.path().to_path_buf());
let auth_dot_json = AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some("test-key".to_string()),
tokens: None,
last_refresh: Some(Utc::now()),
@@ -391,7 +336,6 @@ mod tests {
fn file_storage_delete_removes_auth_file() -> anyhow::Result<()> {
let dir = tempdir()?;
let auth_dot_json = AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some("sk-test-key".to_string()),
tokens: None,
last_refresh: None,
@@ -406,32 +350,6 @@ mod tests {
Ok(())
}
#[test]
fn ephemeral_storage_save_load_delete_is_in_memory_only() -> anyhow::Result<()> {
let dir = tempdir()?;
let storage = create_auth_storage(
dir.path().to_path_buf(),
AuthCredentialsStoreMode::Ephemeral,
);
let auth_dot_json = AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some("sk-ephemeral".to_string()),
tokens: None,
last_refresh: Some(Utc::now()),
};
storage.save(&auth_dot_json)?;
let loaded = storage.load()?;
assert_eq!(Some(auth_dot_json), loaded);
let removed = storage.delete()?;
assert!(removed);
let loaded = storage.load()?;
assert_eq!(None, loaded);
assert!(!get_auth_file(dir.path()).exists());
Ok(())
}
fn seed_keyring_and_fallback_auth_file_for_delete<F>(
mock_keyring: &MockKeyringStore,
codex_home: &Path,
@@ -507,7 +425,6 @@ mod tests {
fn auth_with_prefix(prefix: &str) -> AuthDotJson {
AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some(format!("{prefix}-api-key")),
tokens: Some(TokenData {
id_token: id_token_with_prefix(prefix),
@@ -528,7 +445,6 @@ mod tests {
Arc::new(mock_keyring.clone()),
);
let expected = AuthDotJson {
auth_mode: Some(AuthMode::ApiKey),
openai_api_key: Some("sk-test".to_string()),
tokens: None,
last_refresh: None,
@@ -565,7 +481,6 @@ mod tests {
let auth_file = get_auth_file(codex_home.path());
std::fs::write(&auth_file, "stale")?;
let auth = AuthDotJson {
auth_mode: Some(AuthMode::ChatGPT),
openai_api_key: None,
tokens: Some(TokenData {
id_token: Default::default(),

View File

@@ -27,6 +27,7 @@ use codex_api::common::ResponsesWsRequest;
use codex_api::create_text_param_for_request;
use codex_api::error::ApiError;
use codex_api::requests::responses::Compression;
use codex_app_server_protocol::AuthMode;
use codex_otel::OtelManager;
use codex_protocol::ThreadId;
@@ -49,7 +50,6 @@ use tokio::sync::mpsc;
use tracing::warn;
use crate::AuthManager;
use crate::auth::CodexAuth;
use crate::auth::RefreshTokenError;
use crate::client_common::Prompt;
use crate::client_common::ResponseEvent;
@@ -65,7 +65,6 @@ use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::tools::spec::create_tools_json_for_chat_completions_api;
use crate::tools::spec::create_tools_json_for_responses_api;
use crate::transport_manager::TransportManager;
pub const WEB_SEARCH_ELIGIBLE_HEADER: &str = "x-oai-web-search-eligible";
pub const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state";
@@ -81,7 +80,6 @@ struct ModelClientState {
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
session_source: SessionSource,
transport_manager: TransportManager,
}
#[derive(Debug, Clone)]
@@ -93,7 +91,6 @@ pub struct ModelClientSession {
state: Arc<ModelClientState>,
connection: Option<ApiWebSocketConnection>,
websocket_last_items: Vec<ResponseItem>,
transport_manager: TransportManager,
/// Turn state for sticky routing.
///
/// This is an `OnceLock` that stores the turn state value received from the server
@@ -119,7 +116,6 @@ impl ModelClient {
summary: ReasoningSummaryConfig,
conversation_id: ThreadId,
session_source: SessionSource,
transport_manager: TransportManager,
) -> Self {
Self {
state: Arc::new(ModelClientState {
@@ -132,7 +128,6 @@ impl ModelClient {
effort,
summary,
session_source,
transport_manager,
}),
}
}
@@ -142,7 +137,6 @@ impl ModelClient {
state: Arc::clone(&self.state),
connection: None,
websocket_last_items: Vec::new(),
transport_manager: self.state.transport_manager.clone(),
turn_state: Arc::new(OnceLock::new()),
}
}
@@ -177,10 +171,6 @@ impl ModelClient {
self.state.session_source.clone()
}
pub(crate) fn transport_manager(&self) -> TransportManager {
self.state.transport_manager.clone()
}
/// Returns the currently configured model slug.
pub fn get_model(&self) -> String {
self.state.model_info.slug.clone()
@@ -220,7 +210,7 @@ impl ModelClient {
let api_provider = self
.state
.provider
.to_api_provider(auth.as_ref().map(CodexAuth::internal_auth_mode))?;
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
let transport = ReqwestTransport::new(build_reqwest_client());
let request_telemetry = self.build_request_telemetry();
@@ -260,18 +250,9 @@ impl ModelClientSession {
/// For Chat providers, the underlying stream is optionally aggregated
/// based on the `show_raw_agent_reasoning` flag in the config.
pub async fn stream(&mut self, prompt: &Prompt) -> Result<ResponseStream> {
let wire_api = self.state.provider.wire_api;
match wire_api {
WireApi::Responses => {
let websocket_enabled = self.responses_websocket_enabled()
&& !self.transport_manager.disable_websockets();
if websocket_enabled {
self.stream_responses_websocket(prompt).await
} else {
self.stream_responses_api(prompt).await
}
}
match self.state.provider.wire_api {
WireApi::Responses => self.stream_responses_api(prompt).await,
WireApi::ResponsesWebsocket => self.stream_responses_websocket(prompt).await,
WireApi::Chat => {
let api_stream = self.stream_chat_completions(prompt).await?;
@@ -290,34 +271,6 @@ impl ModelClientSession {
}
}
pub(crate) fn try_switch_fallback_transport(&mut self) -> bool {
let websocket_enabled = self.responses_websocket_enabled();
let activated = self
.transport_manager
.activate_http_fallback(websocket_enabled);
if activated {
warn!("falling back to HTTP");
self.state.otel_manager.counter(
"codex.transport.fallback_to_http",
1,
&[("from_wire_api", "responses_websocket")],
);
self.connection = None;
self.websocket_last_items.clear();
}
activated
}
fn responses_websocket_enabled(&self) -> bool {
self.state.provider.supports_websockets
&& self
.state
.config
.features
.enabled(Feature::ResponsesWebsockets)
}
fn build_responses_request(&self, prompt: &Prompt) -> Result<ApiPrompt> {
let instructions = prompt.base_instructions.text.clone();
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
@@ -469,7 +422,7 @@ impl ModelClientSession {
.config
.features
.enabled(Feature::EnableRequestCompression)
&& auth.is_some_and(CodexAuth::is_chatgpt_auth)
&& auth.is_some_and(|auth| auth.mode == AuthMode::ChatGPT)
&& self.state.provider.is_openai()
{
Compression::Zstd
@@ -507,7 +460,7 @@ impl ModelClientSession {
let api_provider = self
.state
.provider
.to_api_provider(auth.as_ref().map(CodexAuth::internal_auth_mode))?;
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
let transport = ReqwestTransport::new(build_reqwest_client());
let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry();
@@ -563,7 +516,7 @@ impl ModelClientSession {
let api_provider = self
.state
.provider
.to_api_provider(auth.as_ref().map(CodexAuth::internal_auth_mode))?;
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
let transport = ReqwestTransport::new(build_reqwest_client());
let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry();
@@ -609,7 +562,7 @@ impl ModelClientSession {
let api_provider = self
.state
.provider
.to_api_provider(auth.as_ref().map(CodexAuth::internal_auth_mode))?;
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
let compression = self.responses_request_compression(auth.as_ref());

File diff suppressed because it is too large Load Diff

View File

@@ -126,9 +126,19 @@ pub(crate) async fn run_codex_thread_one_shot(
.await?;
// Send the initial input to kick off the one-shot turn.
io.submit(Op::UserInput {
let snapshot = io.thread_config_snapshot().await;
io.submit(Op::UserTurn {
use_thread_defaults: false,
items: input,
cwd: snapshot.cwd,
approval_policy: snapshot.approval_policy,
sandbox_policy: snapshot.sandbox_policy,
model: snapshot.model,
effort: snapshot.reasoning_effort,
summary: snapshot.reasoning_summary,
final_output_json_schema: None,
collaboration_mode: None,
personality: snapshot.personality,
})
.await?;
@@ -208,10 +218,6 @@ async fn forward_events(
id: _,
msg: EventMsg::SessionConfigured(_),
} => {}
Event {
id: _,
msg: EventMsg::ThreadNameUpdated(_),
} => {}
Event {
id,
msg: EventMsg::ExecApprovalRequest(event),

View File

@@ -5,15 +5,16 @@ use crate::protocol::Event;
use crate::protocol::Op;
use crate::protocol::Submission;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use codex_protocol::user_input::UserInput;
use serde_json::Value;
use std::path::PathBuf;
use tokio::sync::watch;
use crate::state_db::StateDbHandle;
#[derive(Clone, Debug)]
pub struct ThreadConfigSnapshot {
pub model: String,
@@ -22,6 +23,7 @@ pub struct ThreadConfigSnapshot {
pub sandbox_policy: SandboxPolicy,
pub cwd: PathBuf,
pub reasoning_effort: Option<ReasoningEffort>,
pub reasoning_summary: ReasoningSummary,
pub personality: Option<Personality>,
pub session_source: SessionSource,
}
@@ -66,11 +68,41 @@ impl CodexThread {
self.rollout_path.clone()
}
pub fn state_db(&self) -> Option<StateDbHandle> {
self.codex.state_db()
}
pub async fn config_snapshot(&self) -> ThreadConfigSnapshot {
self.codex.thread_config_snapshot().await
}
/// Build a user turn using the thread's current default settings.
pub async fn user_turn_with_defaults(
&self,
items: Vec<UserInput>,
final_output_json_schema: Option<Value>,
) -> Op {
let snapshot = self.config_snapshot().await;
Op::UserTurn {
use_thread_defaults: true,
items,
cwd: snapshot.cwd,
approval_policy: snapshot.approval_policy,
sandbox_policy: snapshot.sandbox_policy,
model: snapshot.model,
effort: snapshot.reasoning_effort,
summary: snapshot.reasoning_summary,
final_output_json_schema,
collaboration_mode: None,
personality: snapshot.personality,
}
}
/// Submit a user turn using the thread's current default settings.
pub async fn submit_user_turn_with_defaults(
&self,
items: Vec<UserInput>,
final_output_json_schema: Option<Value>,
) -> CodexResult<String> {
let op = self
.user_turn_with_defaults(items, final_output_json_schema)
.await;
self.submit(op).await
}
}

View File

@@ -10,6 +10,7 @@ use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::features::Feature;
use crate::protocol::CompactedItem;
use crate::protocol::ContextCompactedEvent;
use crate::protocol::EventMsg;
use crate::protocol::TurnContextItem;
use crate::protocol::TurnStartedEvent;
@@ -19,7 +20,6 @@ use crate::truncate::TruncationPolicy;
use crate::truncate::approx_token_count;
use crate::truncate::truncate_text;
use crate::util::backoff;
use codex_protocol::items::ContextCompactionItem;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
@@ -71,9 +71,6 @@ async fn run_compact_task_inner(
turn_context: Arc<TurnContext>,
input: Vec<UserInput>,
) {
let compaction_item = TurnItem::ContextCompaction(ContextCompactionItem::new());
sess.emit_turn_item_started(&turn_context, &compaction_item)
.await;
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
let mut history = sess.clone_history().await;
@@ -196,8 +193,9 @@ async fn run_compact_task_inner(
});
sess.persist_rollout_items(&[rollout_item]).await;
sess.emit_turn_item_completed(&turn_context, compaction_item)
.await;
let event = EventMsg::ContextCompacted(ContextCompactedEvent {});
sess.send_event(&turn_context, event).await;
let warning = EventMsg::Warning(WarningEvent {
message: "Heads up: Long threads and multiple compactions can cause the model to be less accurate. Start a new thread when possible to keep threads small and targeted.".to_string(),
});

View File

@@ -5,11 +5,10 @@ use crate::codex::Session;
use crate::codex::TurnContext;
use crate::error::Result as CodexResult;
use crate::protocol::CompactedItem;
use crate::protocol::ContextCompactedEvent;
use crate::protocol::EventMsg;
use crate::protocol::RolloutItem;
use crate::protocol::TurnStartedEvent;
use codex_protocol::items::ContextCompactionItem;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
pub(crate) async fn run_inline_remote_auto_compact_task(
@@ -41,9 +40,6 @@ async fn run_remote_compact_task_inner_impl(
sess: &Arc<Session>,
turn_context: &Arc<TurnContext>,
) -> CodexResult<()> {
let compaction_item = TurnItem::ContextCompaction(ContextCompactionItem::new());
sess.emit_turn_item_started(turn_context, &compaction_item)
.await;
let history = sess.clone_history().await;
// Required to keep `/undo` available after compaction
@@ -81,7 +77,8 @@ async fn run_remote_compact_task_inner_impl(
sess.persist_rollout_items(&[RolloutItem::Compacted(compacted_item)])
.await;
sess.emit_turn_item_completed(turn_context, compaction_item)
.await;
let event = EventMsg::ContextCompacted(ContextCompactedEvent {});
sess.send_event(turn_context, event).await;
Ok(())
}

View File

@@ -7,7 +7,6 @@ use crate::config::types::McpServerConfig;
use crate::config::types::McpServerDisabledReason;
use crate::config::types::McpServerTransportConfig;
use crate::config::types::Notice;
use crate::config::types::NotificationMethod;
use crate::config::types::Notifications;
use crate::config::types::OtelConfig;
use crate::config::types::OtelConfigToml;
@@ -18,7 +17,6 @@ use crate::config::types::ShellEnvironmentPolicyToml;
use crate::config::types::SkillsConfig;
use crate::config::types::Tui;
use crate::config::types::UriBasedFileOpener;
use crate::config_loader::CloudRequirementsLoader;
use crate::config_loader::ConfigLayerStack;
use crate::config_loader::ConfigRequirements;
use crate::config_loader::LoaderOverrides;
@@ -40,7 +38,6 @@ use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use crate::windows_sandbox::WindowsSandboxLevelExt;
use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use codex_protocol::config_types::AltScreenMode;
@@ -52,7 +49,6 @@ use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_protocol::openai_models::ReasoningEffort;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBuf;
@@ -194,13 +190,10 @@ pub struct Config {
/// If unset the feature is disabled.
pub notify: Option<Vec<String>>,
/// TUI notifications preference. When set, the TUI will send terminal notifications on
/// approvals and turn completions when not focused.
/// TUI notifications preference. When set, the TUI will send OSC 9 notifications on approvals
/// and turn completions when not focused.
pub tui_notifications: Notifications,
/// Notification method for terminal notifications (osc9 or bel).
pub tui_notification_method: NotificationMethod,
/// Enable ASCII animations and shimmer effects in the TUI.
pub animations: bool,
@@ -323,9 +316,6 @@ pub struct Config {
/// Centralized feature flags; source of truth for feature gating.
pub features: Features,
/// When `true`, suppress warnings about unstable (under development) features.
pub suppress_unstable_features_warning: bool,
/// The active profile name used to derive this `Config` (if any).
pub active_profile: Option<String>,
@@ -367,7 +357,6 @@ pub struct ConfigBuilder {
cli_overrides: Option<Vec<(String, TomlValue)>>,
harness_overrides: Option<ConfigOverrides>,
loader_overrides: Option<LoaderOverrides>,
cloud_requirements: Option<CloudRequirementsLoader>,
fallback_cwd: Option<PathBuf>,
}
@@ -392,11 +381,6 @@ impl ConfigBuilder {
self
}
pub fn cloud_requirements(mut self, cloud_requirements: CloudRequirementsLoader) -> Self {
self.cloud_requirements = Some(cloud_requirements);
self
}
pub fn fallback_cwd(mut self, fallback_cwd: Option<PathBuf>) -> Self {
self.fallback_cwd = fallback_cwd;
self
@@ -408,7 +392,6 @@ impl ConfigBuilder {
cli_overrides,
harness_overrides,
loader_overrides,
cloud_requirements,
fallback_cwd,
} = self;
let codex_home = codex_home.map_or_else(find_codex_home, std::io::Result::Ok)?;
@@ -421,14 +404,9 @@ impl ConfigBuilder {
None => AbsolutePathBuf::current_dir()?,
};
harness_overrides.cwd = Some(cwd.to_path_buf());
let config_layer_stack = load_config_layers_state(
&codex_home,
Some(cwd),
&cli_overrides,
loader_overrides,
cloud_requirements,
)
.await?;
let config_layer_stack =
load_config_layers_state(&codex_home, Some(cwd), &cli_overrides, loader_overrides)
.await?;
let merged_toml = config_layer_stack.effective_config();
// Note that each layer in ConfigLayerStack should have resolved
@@ -524,7 +502,6 @@ pub async fn load_config_as_toml_with_cli_overrides(
Some(cwd.clone()),
&cli_overrides,
LoaderOverrides::default(),
None,
)
.await?;
@@ -623,14 +600,9 @@ pub async fn load_global_mcp_servers(
// There is no cwd/project context for this query, so this will not include
// MCP servers defined in in-repo .codex/ folders.
let cwd: Option<AbsolutePathBuf> = None;
let config_layer_stack = load_config_layers_state(
codex_home,
cwd,
&cli_overrides,
LoaderOverrides::default(),
None,
)
.await?;
let config_layer_stack =
load_config_layers_state(codex_home, cwd, &cli_overrides, LoaderOverrides::default())
.await?;
let merged_toml = config_layer_stack.effective_config();
let Some(servers_value) = merged_toml.get("mcp_servers") else {
return Ok(BTreeMap::new());
@@ -934,9 +906,6 @@ pub struct ConfigToml {
#[schemars(schema_with = "crate::config::schema::features_schema")]
pub features: Option<FeaturesToml>,
/// Suppress warnings about unstable (under development) features.
pub suppress_unstable_features_warning: Option<bool>,
/// Settings for ghost snapshots (used for undo).
#[serde(default)]
pub ghost_snapshot: Option<GhostSnapshotToml>,
@@ -1081,7 +1050,6 @@ impl ConfigToml {
&self,
sandbox_mode_override: Option<SandboxMode>,
profile_sandbox_mode: Option<SandboxMode>,
windows_sandbox_level: WindowsSandboxLevel,
resolved_cwd: &Path,
) -> SandboxPolicyResolution {
let resolved_sandbox_mode = sandbox_mode_override
@@ -1120,7 +1088,7 @@ impl ConfigToml {
if cfg!(target_os = "windows")
&& matches!(resolved_sandbox_mode, SandboxMode::WorkspaceWrite)
// If the experimental Windows sandbox is enabled, do not force a downgrade.
&& windows_sandbox_level == codex_protocol::config_types::WindowsSandboxLevel::Disabled
&& crate::safety::get_platform_sandbox().is_none()
{
sandbox_policy = SandboxPolicy::new_read_only_policy();
forced_auto_mode_downgraded_on_windows = true;
@@ -1244,20 +1212,6 @@ fn resolve_web_search_mode(
None
}
pub(crate) fn resolve_web_search_mode_for_turn(
explicit_mode: Option<WebSearchMode>,
sandbox_policy: &SandboxPolicy,
) -> WebSearchMode {
if let Some(mode) = explicit_mode {
return mode;
}
if matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) {
WebSearchMode::Live
} else {
WebSearchMode::Cached
}
}
impl Config {
#[cfg(test)]
fn load_from_base_config_with_overrides(
@@ -1324,6 +1278,17 @@ impl Config {
};
let features = Features::from_config(&cfg, &config_profile, feature_overrides);
let web_search_mode = resolve_web_search_mode(&cfg, &config_profile, &features);
#[cfg(target_os = "windows")]
{
// Base flag controls sandbox on/off; elevated only applies when base is enabled.
let sandbox_enabled = features.enabled(Feature::WindowsSandbox);
crate::safety::set_windows_sandbox_enabled(sandbox_enabled);
let elevated_enabled =
sandbox_enabled && features.enabled(Feature::WindowsSandboxElevated);
crate::safety::set_windows_elevated_sandbox_enabled(elevated_enabled);
}
let resolved_cwd = {
use std::env;
@@ -1350,16 +1315,10 @@ impl Config {
.get_active_project(&resolved_cwd)
.unwrap_or(ProjectConfig { trust_level: None });
let windows_sandbox_level = WindowsSandboxLevel::from_features(&features);
let SandboxPolicyResolution {
policy: mut sandbox_policy,
forced_auto_mode_downgraded_on_windows,
} = cfg.derive_sandbox_policy(
sandbox_mode,
config_profile.sandbox_mode,
windows_sandbox_level,
&resolved_cwd,
);
} = cfg.derive_sandbox_policy(sandbox_mode, config_profile.sandbox_mode, &resolved_cwd);
if let SandboxPolicy::WorkspaceWrite { writable_roots, .. } = &mut sandbox_policy {
for path in additional_writable_roots {
if !writable_roots.iter().any(|existing| existing == &path) {
@@ -1379,7 +1338,6 @@ impl Config {
AskForApproval::default()
}
});
let web_search_mode = resolve_web_search_mode(&cfg, &config_profile, &features);
// TODO(dylan): We should be able to leverage ConfigLayerStack so that
// we can reliably check this at every config level.
let did_user_set_custom_approval_policy_or_sandbox_mode = approval_policy_override
@@ -1391,6 +1349,12 @@ impl Config {
|| cfg.sandbox_mode.is_some();
let mut model_providers = built_in_model_providers();
if features.enabled(Feature::ResponsesWebsockets)
&& let Some(provider) = model_providers.get_mut("openai")
&& provider.is_openai()
{
provider.wire_api = crate::model_provider_info::WireApi::ResponsesWebsocket;
}
// Merge user-defined providers into the built-in list.
for (key, provider) in cfg.model_providers.into_iter() {
model_providers.entry(key).or_insert(provider);
@@ -1600,9 +1564,6 @@ impl Config {
use_experimental_unified_exec_tool,
ghost_snapshot,
features,
suppress_unstable_features_warning: cfg
.suppress_unstable_features_warning
.unwrap_or(false),
active_profile: active_profile_name,
active_project,
windows_wsl_setup_acknowledged: cfg.windows_wsl_setup_acknowledged.unwrap_or(false),
@@ -1624,11 +1585,6 @@ impl Config {
.as_ref()
.map(|t| t.notifications.clone())
.unwrap_or_default(),
tui_notification_method: cfg
.tui
.as_ref()
.map(|t| t.notification_method)
.unwrap_or_default(),
animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true),
show_tooltips: cfg.tui.as_ref().map(|t| t.show_tooltips).unwrap_or(true),
experimental_mode: cfg.tui.as_ref().and_then(|t| t.experimental_mode),
@@ -1701,19 +1657,20 @@ impl Config {
}
}
pub fn set_windows_sandbox_enabled(&mut self, value: bool) {
pub fn set_windows_sandbox_globally(&mut self, value: bool) {
crate::safety::set_windows_sandbox_enabled(value);
if value {
self.features.enable(Feature::WindowsSandbox);
self.forced_auto_mode_downgraded_on_windows = false;
} else {
self.features.disable(Feature::WindowsSandbox);
}
self.forced_auto_mode_downgraded_on_windows = !value;
}
pub fn set_windows_elevated_sandbox_enabled(&mut self, value: bool) {
pub fn set_windows_elevated_sandbox_globally(&mut self, value: bool) {
crate::safety::set_windows_elevated_sandbox_enabled(value);
if value {
self.features.enable(Feature::WindowsSandboxElevated);
self.forced_auto_mode_downgraded_on_windows = false;
} else {
self.features.disable(Feature::WindowsSandboxElevated);
}
@@ -1787,7 +1744,6 @@ mod tests {
use crate::config::types::FeedbackConfigToml;
use crate::config::types::HistoryPersistence;
use crate::config::types::McpServerTransportConfig;
use crate::config::types::NotificationMethod;
use crate::config::types::Notifications;
use crate::config_loader::RequirementSource;
use crate::features::Feature;
@@ -1884,7 +1840,6 @@ persistence = "none"
tui,
Tui {
notifications: Notifications::Enabled(true),
notification_method: NotificationMethod::Auto,
animations: true,
show_tooltips: true,
experimental_mode: None,
@@ -1907,7 +1862,6 @@ network_access = false # This should be ignored.
let resolution = sandbox_full_access_cfg.derive_sandbox_policy(
sandbox_mode_override,
None,
WindowsSandboxLevel::Disabled,
&PathBuf::from("/tmp/test"),
);
assert_eq!(
@@ -1931,7 +1885,6 @@ network_access = true # This should be ignored.
let resolution = sandbox_read_only_cfg.derive_sandbox_policy(
sandbox_mode_override,
None,
WindowsSandboxLevel::Disabled,
&PathBuf::from("/tmp/test"),
);
assert_eq!(
@@ -1963,7 +1916,6 @@ exclude_slash_tmp = true
let resolution = sandbox_workspace_write_cfg.derive_sandbox_policy(
sandbox_mode_override,
None,
WindowsSandboxLevel::Disabled,
&PathBuf::from("/tmp/test"),
);
if cfg!(target_os = "windows") {
@@ -2012,7 +1964,6 @@ trust_level = "trusted"
let resolution = sandbox_workspace_write_cfg.derive_sandbox_policy(
sandbox_mode_override,
None,
WindowsSandboxLevel::Disabled,
&PathBuf::from("/tmp/test"),
);
if cfg!(target_os = "windows") {
@@ -2304,7 +2255,7 @@ trust_level = "trusted"
}
#[test]
fn web_search_mode_defaults_to_none_if_unset() {
fn web_search_mode_uses_none_if_unset() {
let cfg = ConfigToml::default();
let profile = ConfigProfile::default();
let features = Features::with_defaults();
@@ -2344,30 +2295,6 @@ trust_level = "trusted"
);
}
#[test]
fn web_search_mode_for_turn_defaults_to_cached_when_unset() {
let mode = resolve_web_search_mode_for_turn(None, &SandboxPolicy::ReadOnly);
assert_eq!(mode, WebSearchMode::Cached);
}
#[test]
fn web_search_mode_for_turn_defaults_to_live_for_danger_full_access() {
let mode = resolve_web_search_mode_for_turn(None, &SandboxPolicy::DangerFullAccess);
assert_eq!(mode, WebSearchMode::Live);
}
#[test]
fn web_search_mode_for_turn_prefers_explicit_value() {
let mode = resolve_web_search_mode_for_turn(
Some(WebSearchMode::Cached),
&SandboxPolicy::DangerFullAccess,
);
assert_eq!(mode, WebSearchMode::Cached);
}
#[test]
fn profile_legacy_toggles_override_base() -> std::io::Result<()> {
let codex_home = TempDir::new()?;
@@ -2568,7 +2495,7 @@ profile = "project"
}
#[test]
fn responses_websockets_feature_does_not_change_wire_api() -> std::io::Result<()> {
fn responses_websockets_feature_updates_openai_provider() -> std::io::Result<()> {
let codex_home = TempDir::new()?;
let mut entries = BTreeMap::new();
entries.insert("responses_websockets".to_string(), true);
@@ -2585,7 +2512,7 @@ profile = "project"
assert_eq!(
config.model_provider.wire_api,
crate::model_provider_info::WireApi::Responses
crate::model_provider_info::WireApi::ResponsesWebsocket
);
Ok(())
@@ -2631,8 +2558,7 @@ profile = "project"
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
let config_layer_stack =
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides, None)
.await?;
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides).await?;
let cfg = deserialize_config_toml_with_base(
config_layer_stack.effective_config(),
codex_home.path(),
@@ -2759,7 +2685,6 @@ profile = "project"
Some(cwd),
&[("model".to_string(), TomlValue::String("cli".to_string()))],
overrides,
None,
)
.await?;
@@ -3707,7 +3632,6 @@ model_verbosity = "high"
stream_max_retries: Some(10),
stream_idle_timeout_ms: Some(300_000),
requires_openai_auth: false,
supports_websockets: false,
};
let model_provider_map = {
let mut model_provider_map = built_in_model_providers();
@@ -3808,7 +3732,6 @@ model_verbosity = "high"
use_experimental_unified_exec_tool: false,
ghost_snapshot: GhostSnapshotConfig::default(),
features: Features::with_defaults(),
suppress_unstable_features_warning: false,
active_profile: Some("o3".to_string()),
active_project: ProjectConfig { trust_level: None },
windows_wsl_setup_acknowledged: false,
@@ -3816,7 +3739,6 @@ model_verbosity = "high"
check_for_update_on_startup: true,
disable_paste_burst: false,
tui_notifications: Default::default(),
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
experimental_mode: None,
@@ -3892,7 +3814,6 @@ model_verbosity = "high"
use_experimental_unified_exec_tool: false,
ghost_snapshot: GhostSnapshotConfig::default(),
features: Features::with_defaults(),
suppress_unstable_features_warning: false,
active_profile: Some("gpt3".to_string()),
active_project: ProjectConfig { trust_level: None },
windows_wsl_setup_acknowledged: false,
@@ -3900,7 +3821,6 @@ model_verbosity = "high"
check_for_update_on_startup: true,
disable_paste_burst: false,
tui_notifications: Default::default(),
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
experimental_mode: None,
@@ -3991,7 +3911,6 @@ model_verbosity = "high"
use_experimental_unified_exec_tool: false,
ghost_snapshot: GhostSnapshotConfig::default(),
features: Features::with_defaults(),
suppress_unstable_features_warning: false,
active_profile: Some("zdr".to_string()),
active_project: ProjectConfig { trust_level: None },
windows_wsl_setup_acknowledged: false,
@@ -3999,7 +3918,6 @@ model_verbosity = "high"
check_for_update_on_startup: true,
disable_paste_burst: false,
tui_notifications: Default::default(),
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
experimental_mode: None,
@@ -4076,7 +3994,6 @@ model_verbosity = "high"
use_experimental_unified_exec_tool: false,
ghost_snapshot: GhostSnapshotConfig::default(),
features: Features::with_defaults(),
suppress_unstable_features_warning: false,
active_profile: Some("gpt5".to_string()),
active_project: ProjectConfig { trust_level: None },
windows_wsl_setup_acknowledged: false,
@@ -4084,7 +4001,6 @@ model_verbosity = "high"
check_for_update_on_startup: true,
disable_paste_burst: false,
tui_notifications: Default::default(),
tui_notification_method: Default::default(),
animations: true,
show_tooltips: true,
experimental_mode: None,
@@ -4258,12 +4174,7 @@ trust_level = "untrusted"
let cfg = toml::from_str::<ConfigToml>(config_with_untrusted)
.expect("TOML deserialization should succeed");
let resolution = cfg.derive_sandbox_policy(
None,
None,
WindowsSandboxLevel::Disabled,
&PathBuf::from("/tmp/test"),
);
let resolution = cfg.derive_sandbox_policy(None, None, &PathBuf::from("/tmp/test"));
// Verify that untrusted projects get WorkspaceWrite (or ReadOnly on Windows due to downgrade)
if cfg!(target_os = "windows") {
@@ -4442,17 +4353,13 @@ mcp_oauth_callback_port = 5678
#[cfg(test)]
mod notifications_tests {
use crate::config::types::NotificationMethod;
use crate::config::types::Notifications;
use assert_matches::assert_matches;
use serde::Deserialize;
#[derive(Deserialize, Debug, PartialEq)]
struct TuiTomlTest {
#[serde(default)]
notifications: Notifications,
#[serde(default)]
notification_method: NotificationMethod,
}
#[derive(Deserialize, Debug, PartialEq)]
@@ -4483,15 +4390,4 @@ mod notifications_tests {
Notifications::Custom(ref v) if v == &vec!["foo".to_string()]
);
}
#[test]
fn test_tui_notification_method() {
let toml = r#"
[tui]
notification_method = "bel"
"#;
let parsed: RootTomlTest =
toml::from_str(toml).expect("deserialize notification_method=\"bel\"");
assert_eq!(parsed.tui.notification_method, NotificationMethod::Bel);
}
}

View File

@@ -376,7 +376,6 @@ impl ConfigService {
cwd,
&self.cli_overrides,
self.loader_overrides.clone(),
None,
)
.await
}

View File

@@ -428,25 +428,6 @@ impl Default for Notifications {
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Default)]
#[serde(rename_all = "lowercase")]
pub enum NotificationMethod {
#[default]
Auto,
Osc9,
Bel,
}
impl fmt::Display for NotificationMethod {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NotificationMethod::Auto => write!(f, "auto"),
NotificationMethod::Osc9 => write!(f, "osc9"),
NotificationMethod::Bel => write!(f, "bel"),
}
}
}
/// Collection of settings that are specific to the TUI.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
#[schemars(deny_unknown_fields)]
@@ -456,11 +437,6 @@ pub struct Tui {
#[serde(default)]
pub notifications: Notifications,
/// Notification method to use for unfocused terminal notifications.
/// Defaults to `auto`.
#[serde(default)]
pub notification_method: NotificationMethod,
/// Enable animations (welcome screen, shimmer effects, spinners).
/// Defaults to `true`.
#[serde(default = "default_true")]
@@ -496,6 +472,7 @@ const fn default_true() -> bool {
/// (primarily the Codex IDE extension). NOTE: these are different from
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
#[schemars(deny_unknown_fields)]
pub struct Notice {
/// Tracks whether the user has acknowledged the full access warning prompt.
pub hide_full_access_warning: Option<bool>,

View File

@@ -10,7 +10,7 @@ This module is the canonical place to **load and describe Codex configuration la
Exported from `codex_core::config_loader`:
- `load_config_layers_state(codex_home, cwd_opt, cli_overrides, overrides, cloud_requirements) -> ConfigLayerStack`
- `load_config_layers_state(codex_home, cwd_opt, cli_overrides, overrides) -> ConfigLayerStack`
- `ConfigLayerStack`
- `effective_config() -> toml::Value`
- `origins() -> HashMap<String, ConfigLayerMetadata>`
@@ -49,7 +49,6 @@ let layers = load_config_layers_state(
Some(cwd),
&cli_overrides,
LoaderOverrides::default(),
None,
).await?;
let effective = layers.effective_config();

View File

@@ -1,56 +0,0 @@
use crate::config_loader::ConfigRequirementsToml;
use futures::future::BoxFuture;
use futures::future::FutureExt;
use futures::future::Shared;
use std::fmt;
use std::future::Future;
#[derive(Clone)]
pub struct CloudRequirementsLoader {
// TODO(gt): This should return a Result once we can fail-closed.
fut: Shared<BoxFuture<'static, Option<ConfigRequirementsToml>>>,
}
impl CloudRequirementsLoader {
pub fn new<F>(fut: F) -> Self
where
F: Future<Output = Option<ConfigRequirementsToml>> + Send + 'static,
{
Self {
fut: fut.boxed().shared(),
}
}
pub async fn get(&self) -> Option<ConfigRequirementsToml> {
self.fut.clone().await
}
}
impl fmt::Debug for CloudRequirementsLoader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CloudRequirementsLoader").finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
#[tokio::test]
async fn shared_future_runs_once() {
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = Arc::clone(&counter);
let loader = CloudRequirementsLoader::new(async move {
counter_clone.fetch_add(1, Ordering::SeqCst);
Some(ConfigRequirementsToml::default())
});
let (first, second) = tokio::join!(loader.get(), loader.get());
assert_eq!(first, second);
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
}

View File

@@ -13,7 +13,6 @@ use crate::config::ConstraintError;
pub enum RequirementSource {
Unknown,
MdmManagedPreferences { domain: String, key: String },
CloudRequirements,
SystemRequirementsToml { file: AbsolutePathBuf },
LegacyManagedConfigTomlFromFile { file: AbsolutePathBuf },
LegacyManagedConfigTomlFromMdm,
@@ -26,9 +25,6 @@ impl fmt::Display for RequirementSource {
RequirementSource::MdmManagedPreferences { domain, key } => {
write!(f, "MDM {domain}:{key}")
}
RequirementSource::CloudRequirements => {
write!(f, "cloud requirements")
}
RequirementSource::SystemRequirementsToml { file } => {
write!(f, "{}", file.as_path().display())
}
@@ -452,33 +448,6 @@ mod tests {
Ok(())
}
#[test]
fn constraint_error_includes_cloud_requirements_source() -> Result<()> {
let source: ConfigRequirementsToml = from_str(
r#"
allowed_approval_policies = ["on-request"]
"#,
)?;
let source_location = RequirementSource::CloudRequirements;
let mut target = ConfigRequirementsWithSources::default();
target.merge_unset_fields(source_location.clone(), source);
let requirements = ConfigRequirements::try_from(target)?;
assert_eq!(
requirements.approval_policy.can_set(&AskForApproval::Never),
Err(ConstraintError::InvalidValue {
field_name: "approval_policy",
candidate: "Never".into(),
allowed: "[OnRequest]".into(),
requirement_source: source_location,
})
);
Ok(())
}
#[test]
fn deserialize_allowed_approval_policies() -> Result<()> {
let toml_str = r#"

View File

@@ -1,4 +1,3 @@
mod cloud_requirements;
mod config_requirements;
mod diagnostics;
mod fingerprint;
@@ -7,8 +6,6 @@ mod layer_io;
mod macos;
mod merge;
mod overrides;
#[cfg(test)]
mod requirements_exec_policy;
mod state;
#[cfg(test)]
@@ -31,7 +28,6 @@ use std::io;
use std::path::Path;
use toml::Value as TomlValue;
pub use cloud_requirements::CloudRequirementsLoader;
pub use config_requirements::ConfigRequirements;
pub use config_requirements::ConfigRequirementsToml;
pub use config_requirements::McpServerIdentity;
@@ -71,7 +67,6 @@ const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
/// earlier layer cannot be overridden by a later layer:
///
/// - admin: managed preferences (*)
/// - cloud: managed cloud requirements
/// - system `/etc/codex/requirements.toml`
///
/// For backwards compatibility, we also load from
@@ -101,7 +96,6 @@ pub async fn load_config_layers_state(
cwd: Option<AbsolutePathBuf>,
cli_overrides: &[(String, TomlValue)],
overrides: LoaderOverrides,
cloud_requirements: Option<CloudRequirementsLoader>, // TODO(gt): Once exec and app-server are wired up, we can remove the option.
) -> io::Result<ConfigLayerStack> {
let mut config_requirements_toml = ConfigRequirementsWithSources::default();
@@ -114,13 +108,6 @@ pub async fn load_config_layers_state(
)
.await?;
if let Some(loader) = cloud_requirements
&& let Some(requirements) = loader.get().await
{
config_requirements_toml
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
}
// Honor /etc/codex/requirements.toml.
if cfg!(unix) {
load_requirements_toml(

View File

@@ -1,188 +0,0 @@
use codex_execpolicy::Decision;
use codex_execpolicy::Policy;
use codex_execpolicy::rule::PatternToken;
use codex_execpolicy::rule::PrefixPattern;
use codex_execpolicy::rule::PrefixRule;
use codex_execpolicy::rule::RuleRef;
use multimap::MultiMap;
use serde::Deserialize;
use std::sync::Arc;
use thiserror::Error;
/// TOML types for expressing exec policy requirements.
///
/// These types are kept separate from `ConfigRequirementsToml` and are
/// converted into `codex-execpolicy` rules.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub struct RequirementsExecPolicyTomlRoot {
pub exec_policy: RequirementsExecPolicyToml,
}
/// TOML representation of `[exec_policy]` within `requirements.toml`.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub struct RequirementsExecPolicyToml {
pub prefix_rules: Vec<RequirementsExecPolicyPrefixRuleToml>,
}
/// A TOML representation of the `prefix_rule(...)` Starlark builtin.
///
/// This mirrors the builtin defined in `execpolicy/src/parser.rs`.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub struct RequirementsExecPolicyPrefixRuleToml {
pub pattern: Vec<RequirementsExecPolicyPatternTokenToml>,
pub decision: Option<RequirementsExecPolicyDecisionToml>,
pub justification: Option<String>,
}
/// TOML-friendly representation of a pattern token.
///
/// Starlark supports either a string token or a list of alternative tokens at
/// each position, but TOML arrays cannot mix strings and arrays. Using an
/// array of tables sidesteps that restriction.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub struct RequirementsExecPolicyPatternTokenToml {
pub token: Option<String>,
pub any_of: Option<Vec<String>>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum RequirementsExecPolicyDecisionToml {
Allow,
Prompt,
Forbidden,
}
impl RequirementsExecPolicyDecisionToml {
fn as_decision(self) -> Decision {
match self {
Self::Allow => Decision::Allow,
Self::Prompt => Decision::Prompt,
Self::Forbidden => Decision::Forbidden,
}
}
}
#[derive(Debug, Error)]
pub enum RequirementsExecPolicyParseError {
#[error("exec policy prefix_rules cannot be empty")]
EmptyPrefixRules,
#[error("exec policy prefix_rule at index {rule_index} has an empty pattern")]
EmptyPattern { rule_index: usize },
#[error(
"exec policy prefix_rule at index {rule_index} has an invalid pattern token at index {token_index}: {reason}"
)]
InvalidPatternToken {
rule_index: usize,
token_index: usize,
reason: String,
},
#[error("exec policy prefix_rule at index {rule_index} has an empty justification")]
EmptyJustification { rule_index: usize },
}
impl RequirementsExecPolicyToml {
/// Convert requirements TOML exec policy rules into the internal `.rules`
/// representation used by `codex-execpolicy`.
pub fn to_policy(&self) -> Result<Policy, RequirementsExecPolicyParseError> {
if self.prefix_rules.is_empty() {
return Err(RequirementsExecPolicyParseError::EmptyPrefixRules);
}
let mut rules_by_program: MultiMap<String, RuleRef> = MultiMap::new();
for (rule_index, rule) in self.prefix_rules.iter().enumerate() {
if let Some(justification) = &rule.justification
&& justification.trim().is_empty()
{
return Err(RequirementsExecPolicyParseError::EmptyJustification { rule_index });
}
if rule.pattern.is_empty() {
return Err(RequirementsExecPolicyParseError::EmptyPattern { rule_index });
}
let pattern_tokens = rule
.pattern
.iter()
.enumerate()
.map(|(token_index, token)| parse_pattern_token(token, rule_index, token_index))
.collect::<Result<Vec<_>, _>>()?;
let decision = rule
.decision
.map(RequirementsExecPolicyDecisionToml::as_decision)
.unwrap_or(Decision::Allow);
let justification = rule.justification.clone();
let (first_token, remaining_tokens) = pattern_tokens
.split_first()
.ok_or(RequirementsExecPolicyParseError::EmptyPattern { rule_index })?;
let rest: Arc<[PatternToken]> = remaining_tokens.to_vec().into();
for head in first_token.alternatives() {
let rule: RuleRef = Arc::new(PrefixRule {
pattern: PrefixPattern {
first: Arc::from(head.as_str()),
rest: rest.clone(),
},
decision,
justification: justification.clone(),
});
rules_by_program.insert(head.clone(), rule);
}
}
Ok(Policy::new(rules_by_program))
}
}
fn parse_pattern_token(
token: &RequirementsExecPolicyPatternTokenToml,
rule_index: usize,
token_index: usize,
) -> Result<PatternToken, RequirementsExecPolicyParseError> {
match (&token.token, &token.any_of) {
(Some(single), None) => {
if single.trim().is_empty() {
return Err(RequirementsExecPolicyParseError::InvalidPatternToken {
rule_index,
token_index,
reason: "token cannot be empty".to_string(),
});
}
Ok(PatternToken::Single(single.clone()))
}
(None, Some(alternatives)) => {
if alternatives.is_empty() {
return Err(RequirementsExecPolicyParseError::InvalidPatternToken {
rule_index,
token_index,
reason: "any_of cannot be empty".to_string(),
});
}
if alternatives.iter().any(|alt| alt.trim().is_empty()) {
return Err(RequirementsExecPolicyParseError::InvalidPatternToken {
rule_index,
token_index,
reason: "any_of cannot include empty tokens".to_string(),
});
}
Ok(PatternToken::Alts(alternatives.clone()))
}
(Some(_), Some(_)) => Err(RequirementsExecPolicyParseError::InvalidPatternToken {
rule_index,
token_index,
reason: "set either token or any_of, not both".to_string(),
}),
(None, None) => Err(RequirementsExecPolicyParseError::InvalidPatternToken {
rule_index,
token_index,
reason: "set either token or any_of".to_string(),
}),
}
}

View File

@@ -4,15 +4,11 @@ use crate::config::CONFIG_TOML_FILE;
use crate::config::ConfigBuilder;
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;
use crate::config::ConstraintError;
use crate::config::ProjectConfig;
use crate::config_loader::CloudRequirementsLoader;
use crate::config_loader::ConfigLayerEntry;
use crate::config_loader::ConfigLoadError;
use crate::config_loader::ConfigRequirements;
use crate::config_loader::ConfigRequirementsToml;
use crate::config_loader::config_requirements::ConfigRequirementsWithSources;
use crate::config_loader::config_requirements::RequirementSource;
use crate::config_loader::fingerprint::version_for_toml;
use crate::config_loader::load_requirements_toml;
use codex_protocol::config_types::TrustLevel;
@@ -69,7 +65,6 @@ async fn returns_config_error_for_invalid_user_config_toml() {
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await
.expect_err("expected error");
@@ -99,7 +94,6 @@ async fn returns_config_error_for_invalid_managed_config_toml() {
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
)
.await
.expect_err("expected error");
@@ -188,7 +182,6 @@ extra = true
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
)
.await
.expect("load config");
@@ -225,7 +218,6 @@ async fn returns_empty_when_all_layers_missing() {
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
)
.await
.expect("load layers");
@@ -323,7 +315,6 @@ flag = false
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
)
.await
.expect("load config");
@@ -363,7 +354,6 @@ allowed_sandbox_modes = ["read-only"]
),
),
},
None,
)
.await?;
@@ -424,7 +414,6 @@ allowed_approval_policies = ["never"]
),
),
},
None,
)
.await?;
@@ -483,91 +472,6 @@ allowed_approval_policies = ["never", "on-request"]
Ok(())
}
#[tokio::test(flavor = "current_thread")]
async fn cloud_requirements_are_not_overwritten_by_system_requirements() -> anyhow::Result<()> {
let tmp = tempdir()?;
let requirements_file = tmp.path().join("requirements.toml");
tokio::fs::write(
&requirements_file,
r#"
allowed_approval_policies = ["on-request"]
"#,
)
.await?;
let mut config_requirements_toml = ConfigRequirementsWithSources::default();
config_requirements_toml.merge_unset_fields(
RequirementSource::CloudRequirements,
ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
},
);
load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?;
assert_eq!(
config_requirements_toml
.allowed_approval_policies
.as_ref()
.map(|sourced| sourced.value.clone()),
Some(vec![AskForApproval::Never])
);
assert_eq!(
config_requirements_toml
.allowed_approval_policies
.as_ref()
.map(|sourced| sourced.source.clone()),
Some(RequirementSource::CloudRequirements)
);
Ok(())
}
#[tokio::test]
async fn load_config_layers_includes_cloud_requirements() -> anyhow::Result<()> {
let tmp = tempdir()?;
let codex_home = tmp.path().join("home");
tokio::fs::create_dir_all(&codex_home).await?;
let cwd = AbsolutePathBuf::from_absolute_path(tmp.path())?;
let requirements = ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
};
let expected = requirements.clone();
let cloud_requirements = CloudRequirementsLoader::new(async move { Some(requirements) });
let layers = load_config_layers_state(
&codex_home,
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
Some(cloud_requirements),
)
.await?;
assert_eq!(
layers.requirements_toml().allowed_approval_policies,
expected.allowed_approval_policies
);
assert_eq!(
layers
.requirements()
.approval_policy
.can_set(&AskForApproval::OnRequest),
Err(ConstraintError::InvalidValue {
field_name: "approval_policy",
candidate: "OnRequest".into(),
allowed: "[Never]".into(),
requirement_source: RequirementSource::CloudRequirements,
})
);
Ok(())
}
#[tokio::test]
async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
let tmp = tempdir()?;
@@ -597,7 +501,6 @@ async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
@@ -729,7 +632,6 @@ async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> s
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
@@ -789,7 +691,6 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result<
Some(cwd.clone()),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
let project_layers_untrusted: Vec<_> = layers_untrusted
@@ -827,7 +728,6 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result<
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
let project_layers_unknown: Vec<_> = layers_unknown
@@ -888,7 +788,6 @@ async fn invalid_project_config_ignored_when_untrusted_or_unknown() -> std::io::
Some(cwd.clone()),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
let project_layers: Vec<_> = layers
@@ -944,7 +843,6 @@ async fn cli_overrides_with_relative_paths_do_not_break_trust_check() -> std::io
Some(cwd),
&cli_overrides,
LoaderOverrides::default(),
None,
)
.await?;
@@ -986,7 +884,6 @@ async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
)
.await?;
@@ -1014,165 +911,3 @@ async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()
Ok(())
}
mod requirements_exec_policy_tests {
use super::super::requirements_exec_policy::RequirementsExecPolicyDecisionToml;
use super::super::requirements_exec_policy::RequirementsExecPolicyPatternTokenToml;
use super::super::requirements_exec_policy::RequirementsExecPolicyPrefixRuleToml;
use super::super::requirements_exec_policy::RequirementsExecPolicyToml;
use super::super::requirements_exec_policy::RequirementsExecPolicyTomlRoot;
use codex_execpolicy::Decision;
use codex_execpolicy::Evaluation;
use codex_execpolicy::RuleMatch;
use pretty_assertions::assert_eq;
use toml::from_str;
fn tokens(cmd: &[&str]) -> Vec<String> {
cmd.iter().map(std::string::ToString::to_string).collect()
}
fn allow_all(_: &[String]) -> Decision {
Decision::Allow
}
#[test]
fn parses_single_prefix_rule_from_raw_toml() -> anyhow::Result<()> {
let toml_str = r#"
[exec_policy]
prefix_rules = [
{ pattern = [{ token = "rm" }], decision = "forbidden" },
]
"#;
let parsed: RequirementsExecPolicyTomlRoot = from_str(toml_str)?;
assert_eq!(
parsed,
RequirementsExecPolicyTomlRoot {
exec_policy: RequirementsExecPolicyToml {
prefix_rules: vec![RequirementsExecPolicyPrefixRuleToml {
pattern: vec![RequirementsExecPolicyPatternTokenToml {
token: Some("rm".to_string()),
any_of: None,
}],
decision: Some(RequirementsExecPolicyDecisionToml::Forbidden),
justification: None,
}],
},
}
);
Ok(())
}
#[test]
fn parses_multiple_prefix_rules_from_raw_toml() -> anyhow::Result<()> {
let toml_str = r#"
[exec_policy]
prefix_rules = [
{ pattern = [{ token = "rm" }], decision = "forbidden" },
{ pattern = [{ token = "git" }, { any_of = ["push", "commit"] }], decision = "prompt", justification = "review changes before push or commit" },
]
"#;
let parsed: RequirementsExecPolicyTomlRoot = from_str(toml_str)?;
assert_eq!(
parsed,
RequirementsExecPolicyTomlRoot {
exec_policy: RequirementsExecPolicyToml {
prefix_rules: vec![
RequirementsExecPolicyPrefixRuleToml {
pattern: vec![RequirementsExecPolicyPatternTokenToml {
token: Some("rm".to_string()),
any_of: None,
}],
decision: Some(RequirementsExecPolicyDecisionToml::Forbidden),
justification: None,
},
RequirementsExecPolicyPrefixRuleToml {
pattern: vec![
RequirementsExecPolicyPatternTokenToml {
token: Some("git".to_string()),
any_of: None,
},
RequirementsExecPolicyPatternTokenToml {
token: None,
any_of: Some(vec!["push".to_string(), "commit".to_string()]),
},
],
decision: Some(RequirementsExecPolicyDecisionToml::Prompt),
justification: Some("review changes before push or commit".to_string()),
},
],
},
}
);
Ok(())
}
#[test]
fn converts_rules_toml_into_internal_policy_representation() -> anyhow::Result<()> {
let toml_str = r#"
[exec_policy]
prefix_rules = [
{ pattern = [{ token = "rm" }], decision = "forbidden" },
]
"#;
let parsed: RequirementsExecPolicyTomlRoot = from_str(toml_str)?;
let policy = parsed.exec_policy.to_policy()?;
assert_eq!(
policy.check(&tokens(&["rm", "-rf", "/tmp"]), &allow_all),
Evaluation {
decision: Decision::Forbidden,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["rm"]),
decision: Decision::Forbidden,
justification: None,
}],
}
);
Ok(())
}
#[test]
fn head_any_of_expands_into_multiple_program_rules() -> anyhow::Result<()> {
let toml_str = r#"
[exec_policy]
prefix_rules = [
{ pattern = [{ any_of = ["git", "hg"] }, { token = "status" }], decision = "prompt" },
]
"#;
let parsed: RequirementsExecPolicyTomlRoot = from_str(toml_str)?;
let policy = parsed.exec_policy.to_policy()?;
assert_eq!(
policy.check(&tokens(&["git", "status"]), &allow_all),
Evaluation {
decision: Decision::Prompt,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "status"]),
decision: Decision::Prompt,
justification: None,
}],
}
);
assert_eq!(
policy.check(&tokens(&["hg", "status"]), &allow_all),
Evaluation {
decision: Decision::Prompt,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["hg", "status"]),
decision: Decision::Prompt,
justification: None,
}],
}
);
Ok(())
}
}

View File

@@ -3,8 +3,9 @@ use std::env;
use std::path::PathBuf;
use async_channel::unbounded;
pub use codex_app_server_protocol::AppInfo;
use codex_protocol::protocol::SandboxPolicy;
use serde::Deserialize;
use serde::Serialize;
use tokio_util::sync::CancellationToken;
use crate::AuthManager;
@@ -14,13 +15,28 @@ use crate::features::Feature;
use crate::mcp::CODEX_APPS_MCP_SERVER_NAME;
use crate::mcp::auth::compute_auth_statuses;
use crate::mcp::with_codex_apps_mcp;
use crate::mcp_connection_manager::DEFAULT_STARTUP_TIMEOUT;
use crate::mcp_connection_manager::McpConnectionManager;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnectorInfo {
#[serde(rename = "id")]
pub connector_id: String,
#[serde(rename = "name")]
pub connector_name: String,
#[serde(default, rename = "description")]
pub connector_description: Option<String>,
#[serde(default, rename = "logo_url")]
pub logo_url: Option<String>,
#[serde(default, rename = "install_url")]
pub install_url: Option<String>,
#[serde(default)]
pub is_accessible: bool,
}
pub async fn list_accessible_connectors_from_mcp_tools(
config: &Config,
) -> anyhow::Result<Vec<AppInfo>> {
if !config.features.enabled(Feature::Apps) {
) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
@@ -56,13 +72,6 @@ pub async fn list_accessible_connectors_from_mcp_tools(
)
.await;
if let Some(cfg) = mcp_servers.get(CODEX_APPS_MCP_SERVER_NAME) {
let timeout = cfg.startup_timeout_sec.unwrap_or(DEFAULT_STARTUP_TIMEOUT);
mcp_connection_manager
.wait_for_server_ready(CODEX_APPS_MCP_SERVER_NAME, timeout)
.await;
}
let tools = mcp_connection_manager.list_all_tools().await;
cancel_token.cancel();
@@ -77,17 +86,13 @@ fn auth_manager_from_config(config: &Config) -> std::sync::Arc<AuthManager> {
)
}
pub fn connector_display_label(connector: &AppInfo) -> String {
format_connector_label(&connector.name, &connector.id)
}
pub fn connector_mention_slug(connector: &AppInfo) -> String {
connector_name_slug(&connector_display_label(connector))
pub fn connector_display_label(connector: &ConnectorInfo) -> String {
format_connector_label(&connector.connector_name, &connector.connector_id)
}
pub(crate) fn accessible_connectors_from_mcp_tools(
mcp_tools: &HashMap<String, crate::mcp_connection_manager::ToolInfo>,
) -> Vec<AppInfo> {
) -> Vec<ConnectorInfo> {
let tools = mcp_tools.values().filter_map(|tool| {
if tool.server_name != CODEX_APPS_MCP_SERVER_NAME {
return None;
@@ -100,37 +105,34 @@ pub(crate) fn accessible_connectors_from_mcp_tools(
}
pub fn merge_connectors(
connectors: Vec<AppInfo>,
accessible_connectors: Vec<AppInfo>,
) -> Vec<AppInfo> {
let mut merged: HashMap<String, AppInfo> = connectors
connectors: Vec<ConnectorInfo>,
accessible_connectors: Vec<ConnectorInfo>,
) -> Vec<ConnectorInfo> {
let mut merged: HashMap<String, ConnectorInfo> = connectors
.into_iter()
.map(|mut connector| {
connector.is_accessible = false;
(connector.id.clone(), connector)
(connector.connector_id.clone(), connector)
})
.collect();
for mut connector in accessible_connectors {
connector.is_accessible = true;
let connector_id = connector.id.clone();
let connector_id = connector.connector_id.clone();
if let Some(existing) = merged.get_mut(&connector_id) {
existing.is_accessible = true;
if existing.name == existing.id && connector.name != connector.id {
existing.name = connector.name;
if existing.connector_name == existing.connector_id
&& connector.connector_name != connector.connector_id
{
existing.connector_name = connector.connector_name;
}
if existing.description.is_none() && connector.description.is_some() {
existing.description = connector.description;
if existing.connector_description.is_none() && connector.connector_description.is_some()
{
existing.connector_description = connector.connector_description;
}
if existing.logo_url.is_none() && connector.logo_url.is_some() {
existing.logo_url = connector.logo_url;
}
if existing.logo_url_dark.is_none() && connector.logo_url_dark.is_some() {
existing.logo_url_dark = connector.logo_url_dark;
}
if existing.distribution_channel.is_none() && connector.distribution_channel.is_some() {
existing.distribution_channel = connector.distribution_channel;
}
} else {
merged.insert(connector_id, connector);
}
@@ -139,20 +141,23 @@ pub fn merge_connectors(
let mut merged = merged.into_values().collect::<Vec<_>>();
for connector in &mut merged {
if connector.install_url.is_none() {
connector.install_url = Some(connector_install_url(&connector.name, &connector.id));
connector.install_url = Some(connector_install_url(
&connector.connector_name,
&connector.connector_id,
));
}
}
merged.sort_by(|left, right| {
right
.is_accessible
.cmp(&left.is_accessible)
.then_with(|| left.name.cmp(&right.name))
.then_with(|| left.id.cmp(&right.id))
.then_with(|| left.connector_name.cmp(&right.connector_name))
.then_with(|| left.connector_id.cmp(&right.connector_id))
});
merged
}
fn collect_accessible_connectors<I>(tools: I) -> Vec<AppInfo>
fn collect_accessible_connectors<I>(tools: I) -> Vec<ConnectorInfo>
where
I: IntoIterator<Item = (String, Option<String>)>,
{
@@ -167,16 +172,14 @@ where
connectors.insert(connector_id, connector_name);
}
}
let mut accessible: Vec<AppInfo> = connectors
let mut accessible: Vec<ConnectorInfo> = connectors
.into_iter()
.map(|(connector_id, connector_name)| AppInfo {
id: connector_id.clone(),
name: connector_name.clone(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
.map(|(connector_id, connector_name)| ConnectorInfo {
install_url: Some(connector_install_url(&connector_name, &connector_id)),
connector_id,
connector_name,
connector_description: None,
logo_url: None,
is_accessible: true,
})
.collect();
@@ -184,8 +187,8 @@ where
right
.is_accessible
.cmp(&left.is_accessible)
.then_with(|| left.name.cmp(&right.name))
.then_with(|| left.id.cmp(&right.id))
.then_with(|| left.connector_name.cmp(&right.connector_name))
.then_with(|| left.connector_id.cmp(&right.connector_id))
});
accessible
}
@@ -202,7 +205,7 @@ pub fn connector_install_url(name: &str, connector_id: &str) -> String {
format!("https://chatgpt.com/apps/{slug}/{connector_id}")
}
pub fn connector_name_slug(name: &str) -> String {
fn connector_name_slug(name: &str) -> String {
let mut normalized = String::with_capacity(name.len());
for character in name.chars() {
if character.is_ascii_alphanumeric() {

View File

@@ -95,12 +95,6 @@ pub fn originator() -> Originator {
get_originator_value(None)
}
pub fn is_first_party_originator(originator_value: &str) -> bool {
originator_value == DEFAULT_ORIGINATOR
|| originator_value == "codex_vscode"
|| originator_value.starts_with("Codex ")
}
pub fn get_codex_user_agent() -> String {
let build_version = env!("CARGO_PKG_VERSION");
let os_info = os_info::get();
@@ -191,7 +185,6 @@ fn is_sandboxed() -> bool {
mod tests {
use super::*;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
#[test]
fn test_get_codex_user_agent() {
@@ -201,15 +194,6 @@ mod tests {
assert!(user_agent.starts_with(&prefix));
}
#[test]
fn is_first_party_originator_matches_known_values() {
assert_eq!(is_first_party_originator(DEFAULT_ORIGINATOR), true);
assert_eq!(is_first_party_originator("codex_vscode"), true);
assert_eq!(is_first_party_originator("Codex Something Else"), true);
assert_eq!(is_first_party_originator("codex_cli"), false);
assert_eq!(is_first_party_originator("Other"), false);
}
#[tokio::test]
async fn test_create_client_sets_default_headers() {
skip_if_no_network!();

View File

@@ -113,9 +113,6 @@ pub enum CodexErr {
#[error("{0}")]
UsageLimitReached(UsageLimitReachedError),
#[error("{0}")]
ModelCap(ModelCapError),
#[error("{0}")]
ResponseStreamFailed(ResponseStreamFailed),
@@ -208,8 +205,7 @@ impl CodexErr {
| CodexErr::AgentLimitReached { .. }
| CodexErr::Spawn
| CodexErr::SessionConfiguredNotFirstEvent
| CodexErr::UsageLimitReached(_)
| CodexErr::ModelCap(_) => false,
| CodexErr::UsageLimitReached(_) => false,
CodexErr::Stream(..)
| CodexErr::Timeout
| CodexErr::UnexpectedStatus(_)
@@ -375,11 +371,9 @@ impl std::fmt::Display for UsageLimitReachedError {
retry_suffix_after_or(self.resets_at.as_ref())
)
}
Some(PlanType::Known(KnownPlan::Free)) | Some(PlanType::Known(KnownPlan::Go)) => {
format!(
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing),{}",
retry_suffix_after_or(self.resets_at.as_ref())
)
Some(PlanType::Known(KnownPlan::Free)) => {
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro)) => format!(
"You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits{}",
@@ -400,30 +394,6 @@ impl std::fmt::Display for UsageLimitReachedError {
}
}
#[derive(Debug)]
pub struct ModelCapError {
pub(crate) model: String,
pub(crate) reset_after_seconds: Option<u64>,
}
impl std::fmt::Display for ModelCapError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut message = format!(
"Model {} is at capacity. Please try a different model.",
self.model
);
if let Some(seconds) = self.reset_after_seconds {
message.push_str(&format!(
" Try again in {}.",
format_duration_short(seconds)
));
} else {
message.push_str(" Try again later.");
}
write!(f, "{message}")
}
}
fn retry_suffix(resets_at: Option<&DateTime<Utc>>) -> String {
if let Some(resets_at) = resets_at {
let formatted = format_retry_timestamp(resets_at);
@@ -455,18 +425,6 @@ fn format_retry_timestamp(resets_at: &DateTime<Utc>) -> String {
}
}
fn format_duration_short(seconds: u64) -> String {
if seconds < 60 {
"less than a minute".to_string()
} else if seconds < 3600 {
format!("{}m", seconds / 60)
} else if seconds < 86_400 {
format!("{}h", seconds / 3600)
} else {
format!("{}d", seconds / 86_400)
}
}
fn day_suffix(day: u32) -> &'static str {
match day {
11..=13 => "th",
@@ -530,10 +488,6 @@ impl CodexErr {
CodexErr::UsageLimitReached(_)
| CodexErr::QuotaExceeded
| CodexErr::UsageNotIncluded => CodexErrorInfo::UsageLimitExceeded,
CodexErr::ModelCap(err) => CodexErrorInfo::ModelCap {
model: err.model.clone(),
reset_after_seconds: err.reset_after_seconds,
},
CodexErr::RetryLimit(_) => CodexErrorInfo::ResponseTooManyFailedAttempts {
http_status_code: self.http_status_code_value(),
},
@@ -677,45 +631,6 @@ mod tests {
);
}
#[test]
fn model_cap_error_formats_message() {
let err = ModelCapError {
model: "boomslang".to_string(),
reset_after_seconds: Some(120),
};
assert_eq!(
err.to_string(),
"Model boomslang is at capacity. Please try a different model. Try again in 2m."
);
}
#[test]
fn model_cap_error_formats_message_without_reset() {
let err = ModelCapError {
model: "boomslang".to_string(),
reset_after_seconds: None,
};
assert_eq!(
err.to_string(),
"Model boomslang is at capacity. Please try a different model. Try again later."
);
}
#[test]
fn model_cap_error_maps_to_protocol() {
let err = CodexErr::ModelCap(ModelCapError {
model: "boomslang".to_string(),
reset_after_seconds: Some(30),
});
assert_eq!(
err.to_codex_protocol_error(),
CodexErrorInfo::ModelCap {
model: "boomslang".to_string(),
reset_after_seconds: Some(30),
}
);
}
#[test]
fn sandbox_denied_uses_aggregated_output_when_stderr_empty() {
let output = ExecToolCallOutput {
@@ -819,20 +734,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing), or try again later."
);
}
#[test]
fn usage_limit_reached_error_formats_go_plan() {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Go)),
resets_at: None,
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing), or try again later."
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
);
}

View File

@@ -21,7 +21,6 @@ use crate::instructions::SkillInstructions;
use crate::instructions::UserInstructions;
use crate::session_prefix::is_session_prefix;
use crate::user_shell_command::is_user_shell_command_text;
use crate::web_search::web_search_action_detail;
fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
if UserInstructions::is_user_instructions(message)
@@ -128,17 +127,14 @@ pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
raw_content,
}))
}
ResponseItem::WebSearchCall { id, action, .. } => {
let (action, query) = match action {
Some(action) => (action.clone(), web_search_action_detail(action)),
None => (WebSearchAction::Other, String::new()),
};
Some(TurnItem::WebSearch(WebSearchItem {
id: id.clone().unwrap_or_default(),
query,
action,
}))
}
ResponseItem::WebSearchCall {
id,
action: WebSearchAction::Search { query },
..
} => Some(TurnItem::WebSearch(WebSearchItem {
id: id.clone().unwrap_or_default(),
query: query.clone().unwrap_or_default(),
})),
_ => None,
}
}
@@ -148,7 +144,6 @@ mod tests {
use super::parse_turn_item;
use codex_protocol::items::AgentMessageContent;
use codex_protocol::items::TurnItem;
use codex_protocol::items::WebSearchItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemContent;
use codex_protocol::models::ReasoningItemReasoningSummary;
@@ -424,102 +419,18 @@ mod tests {
let item = ResponseItem::WebSearchCall {
id: Some("ws_1".to_string()),
status: Some("completed".to_string()),
action: Some(WebSearchAction::Search {
action: WebSearchAction::Search {
query: Some("weather".to_string()),
}),
},
};
let turn_item = parse_turn_item(&item).expect("expected web search turn item");
match turn_item {
TurnItem::WebSearch(search) => assert_eq!(
search,
WebSearchItem {
id: "ws_1".to_string(),
query: "weather".to_string(),
action: WebSearchAction::Search {
query: Some("weather".to_string()),
},
}
),
other => panic!("expected TurnItem::WebSearch, got {other:?}"),
}
}
#[test]
fn parses_web_search_open_page_call() {
let item = ResponseItem::WebSearchCall {
id: Some("ws_open".to_string()),
status: Some("completed".to_string()),
action: Some(WebSearchAction::OpenPage {
url: Some("https://example.com".to_string()),
}),
};
let turn_item = parse_turn_item(&item).expect("expected web search turn item");
match turn_item {
TurnItem::WebSearch(search) => assert_eq!(
search,
WebSearchItem {
id: "ws_open".to_string(),
query: "https://example.com".to_string(),
action: WebSearchAction::OpenPage {
url: Some("https://example.com".to_string()),
},
}
),
other => panic!("expected TurnItem::WebSearch, got {other:?}"),
}
}
#[test]
fn parses_web_search_find_in_page_call() {
let item = ResponseItem::WebSearchCall {
id: Some("ws_find".to_string()),
status: Some("completed".to_string()),
action: Some(WebSearchAction::FindInPage {
url: Some("https://example.com".to_string()),
pattern: Some("needle".to_string()),
}),
};
let turn_item = parse_turn_item(&item).expect("expected web search turn item");
match turn_item {
TurnItem::WebSearch(search) => assert_eq!(
search,
WebSearchItem {
id: "ws_find".to_string(),
query: "'needle' in https://example.com".to_string(),
action: WebSearchAction::FindInPage {
url: Some("https://example.com".to_string()),
pattern: Some("needle".to_string()),
},
}
),
other => panic!("expected TurnItem::WebSearch, got {other:?}"),
}
}
#[test]
fn parses_partial_web_search_call_without_action_as_other() {
let item = ResponseItem::WebSearchCall {
id: Some("ws_partial".to_string()),
status: Some("in_progress".to_string()),
action: None,
};
let turn_item = parse_turn_item(&item).expect("expected web search turn item");
match turn_item {
TurnItem::WebSearch(search) => assert_eq!(
search,
WebSearchItem {
id: "ws_partial".to_string(),
query: String::new(),
action: WebSearchAction::Other,
}
),
TurnItem::WebSearch(search) => {
assert_eq!(search.id, "ws_1");
assert_eq!(search.query, "weather");
}
other => panic!("expected TurnItem::WebSearch, got {other:?}"),
}
}

View File

@@ -64,7 +64,6 @@ pub struct ExecParams {
pub expiration: ExecExpiration,
pub env: HashMap<String, String>,
pub sandbox_permissions: SandboxPermissions,
pub windows_sandbox_level: codex_protocol::config_types::WindowsSandboxLevel,
pub justification: Option<String>,
pub arg0: Option<String>,
}
@@ -142,15 +141,11 @@ pub async fn process_exec_tool_call(
codex_linux_sandbox_exe: &Option<PathBuf>,
stdout_stream: Option<StdoutStream>,
) -> Result<ExecToolCallOutput> {
let windows_sandbox_level = params.windows_sandbox_level;
let sandbox_type = match &sandbox_policy {
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => {
SandboxType::None
}
_ => get_platform_sandbox(
windows_sandbox_level != codex_protocol::config_types::WindowsSandboxLevel::Disabled,
)
.unwrap_or(SandboxType::None),
_ => get_platform_sandbox().unwrap_or(SandboxType::None),
};
tracing::debug!("Sandbox type: {sandbox_type:?}");
@@ -160,7 +155,6 @@ pub async fn process_exec_tool_call(
expiration,
env,
sandbox_permissions,
windows_sandbox_level,
justification,
arg0: _,
} = params;
@@ -190,7 +184,6 @@ pub async fn process_exec_tool_call(
sandbox_type,
sandbox_cwd,
codex_linux_sandbox_exe.as_ref(),
windows_sandbox_level,
)
.map_err(CodexErr::from)?;
@@ -209,7 +202,6 @@ pub(crate) async fn execute_exec_env(
env,
expiration,
sandbox,
windows_sandbox_level,
sandbox_permissions,
justification,
arg0,
@@ -221,7 +213,6 @@ pub(crate) async fn execute_exec_env(
expiration,
env,
sandbox_permissions,
windows_sandbox_level,
justification,
arg0,
};
@@ -232,79 +223,13 @@ pub(crate) async fn execute_exec_env(
finalize_exec_result(raw_output_result, sandbox, duration)
}
#[cfg(target_os = "windows")]
fn extract_create_process_as_user_error_code(err: &str) -> Option<String> {
let marker = "CreateProcessAsUserW failed: ";
let start = err.find(marker)? + marker.len();
let tail = &err[start..];
let digits: String = tail.chars().take_while(char::is_ascii_digit).collect();
if digits.is_empty() {
None
} else {
Some(digits)
}
}
#[cfg(target_os = "windows")]
fn windowsapps_path_kind(path: &str) -> &'static str {
let lower = path.to_ascii_lowercase();
if lower.contains("\\program files\\windowsapps\\") {
return "windowsapps_package";
}
if lower.contains("\\appdata\\local\\microsoft\\windowsapps\\") {
return "windowsapps_alias";
}
if lower.contains("\\windowsapps\\") {
return "windowsapps_other";
}
"other"
}
#[cfg(target_os = "windows")]
fn record_windows_sandbox_spawn_failure(
command_path: Option<&str>,
windows_sandbox_level: codex_protocol::config_types::WindowsSandboxLevel,
err: &str,
) {
let Some(error_code) = extract_create_process_as_user_error_code(err) else {
return;
};
let path = command_path.unwrap_or("unknown");
let exe = Path::new(path)
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("unknown")
.to_ascii_lowercase();
let path_kind = windowsapps_path_kind(path);
let level = if matches!(
windows_sandbox_level,
codex_protocol::config_types::WindowsSandboxLevel::Elevated
) {
"elevated"
} else {
"legacy"
};
if let Some(metrics) = codex_otel::metrics::global() {
let _ = metrics.counter(
"codex.windows_sandbox.createprocessasuserw_failed",
1,
&[
("error_code", error_code.as_str()),
("path_kind", path_kind),
("exe", exe.as_str()),
("level", level),
],
);
}
}
#[cfg(target_os = "windows")]
async fn exec_windows_sandbox(
params: ExecParams,
sandbox_policy: &SandboxPolicy,
) -> Result<RawExecToolCallOutput> {
use crate::config::find_codex_home;
use codex_protocol::config_types::WindowsSandboxLevel;
use crate::safety::is_windows_elevated_sandbox_enabled;
use codex_windows_sandbox::run_windows_sandbox_capture;
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
@@ -313,7 +238,6 @@ async fn exec_windows_sandbox(
cwd,
env,
expiration,
windows_sandbox_level,
..
} = params;
// TODO(iceweasel-oai): run_windows_sandbox_capture should support all
@@ -331,9 +255,7 @@ async fn exec_windows_sandbox(
"windows sandbox: failed to resolve codex_home: {err}"
)))
})?;
let command_path = command.first().cloned();
let sandbox_level = windows_sandbox_level;
let use_elevated = matches!(sandbox_level, WindowsSandboxLevel::Elevated);
let use_elevated = is_windows_elevated_sandbox_enabled();
let spawn_res = tokio::task::spawn_blocking(move || {
if use_elevated {
run_windows_sandbox_capture_elevated(
@@ -362,11 +284,6 @@ async fn exec_windows_sandbox(
let capture = match spawn_res {
Ok(Ok(v)) => v,
Ok(Err(err)) => {
record_windows_sandbox_spawn_failure(
command_path.as_deref(),
sandbox_level,
&err.to_string(),
);
return Err(CodexErr::Io(io::Error::other(format!(
"windows sandbox: {err}"
))));
@@ -395,7 +312,20 @@ async fn exec_windows_sandbox(
text: stderr_text,
truncated_after_lines: None,
};
let aggregated_output = aggregate_output(&stdout, &stderr);
// Best-effort aggregate: stdout then stderr (capped).
let mut aggregated = Vec::with_capacity(
stdout
.text
.len()
.saturating_add(stderr.text.len())
.min(EXEC_OUTPUT_MAX_BYTES),
);
append_capped(&mut aggregated, &stdout.text, EXEC_OUTPUT_MAX_BYTES);
append_capped(&mut aggregated, &stderr.text, EXEC_OUTPUT_MAX_BYTES);
let aggregated_output = StreamOutput {
text: aggregated,
truncated_after_lines: None,
};
Ok(RawExecToolCallOutput {
exit_status,
@@ -589,39 +519,6 @@ fn append_capped(dst: &mut Vec<u8>, src: &[u8], max_bytes: usize) {
dst.extend_from_slice(&src[..take]);
}
fn aggregate_output(
stdout: &StreamOutput<Vec<u8>>,
stderr: &StreamOutput<Vec<u8>>,
) -> StreamOutput<Vec<u8>> {
let total_len = stdout.text.len().saturating_add(stderr.text.len());
let max_bytes = EXEC_OUTPUT_MAX_BYTES;
let mut aggregated = Vec::with_capacity(total_len.min(max_bytes));
if total_len <= max_bytes {
aggregated.extend_from_slice(&stdout.text);
aggregated.extend_from_slice(&stderr.text);
return StreamOutput {
text: aggregated,
truncated_after_lines: None,
};
}
// Under contention, reserve 1/3 for stdout and 2/3 for stderr; rebalance unused stderr to stdout.
let want_stdout = stdout.text.len().min(max_bytes / 3);
let want_stderr = stderr.text.len();
let stderr_take = want_stderr.min(max_bytes.saturating_sub(want_stdout));
let remaining = max_bytes.saturating_sub(want_stdout + stderr_take);
let stdout_take = want_stdout + remaining.min(stdout.text.len().saturating_sub(want_stdout));
aggregated.extend_from_slice(&stdout.text[..stdout_take]);
aggregated.extend_from_slice(&stderr.text[..stderr_take]);
StreamOutput {
text: aggregated,
truncated_after_lines: None,
}
}
#[derive(Clone, Debug)]
pub struct ExecToolCallOutput {
pub exit_code: i32,
@@ -667,7 +564,6 @@ async fn exec(
env,
arg0,
expiration,
windows_sandbox_level: _,
..
} = params;
@@ -787,7 +683,20 @@ async fn consume_truncated_output(
Duration::from_millis(IO_DRAIN_TIMEOUT_MS),
)
.await?;
let aggregated_output = aggregate_output(&stdout, &stderr);
// Best-effort aggregate: stdout then stderr (capped).
let mut aggregated = Vec::with_capacity(
stdout
.text
.len()
.saturating_add(stderr.text.len())
.min(EXEC_OUTPUT_MAX_BYTES),
);
append_capped(&mut aggregated, &stdout.text, EXEC_OUTPUT_MAX_BYTES);
append_capped(&mut aggregated, &stderr.text, EXEC_OUTPUT_MAX_BYTES * 2);
let aggregated_output = StreamOutput {
text: aggregated,
truncated_after_lines: None,
};
Ok(RawExecToolCallOutput {
exit_status,
@@ -862,7 +771,6 @@ fn synthetic_exit_status(code: i32) -> ExitStatus {
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::time::Duration;
use tokio::io::AsyncWriteExt;
@@ -938,85 +846,6 @@ mod tests {
assert_eq!(out.text.len(), EXEC_OUTPUT_MAX_BYTES);
}
#[test]
fn aggregate_output_prefers_stderr_on_contention() {
let stdout = StreamOutput {
text: vec![b'a'; EXEC_OUTPUT_MAX_BYTES],
truncated_after_lines: None,
};
let stderr = StreamOutput {
text: vec![b'b'; EXEC_OUTPUT_MAX_BYTES],
truncated_after_lines: None,
};
let aggregated = aggregate_output(&stdout, &stderr);
let stdout_cap = EXEC_OUTPUT_MAX_BYTES / 3;
let stderr_cap = EXEC_OUTPUT_MAX_BYTES.saturating_sub(stdout_cap);
assert_eq!(aggregated.text.len(), EXEC_OUTPUT_MAX_BYTES);
assert_eq!(aggregated.text[..stdout_cap], vec![b'a'; stdout_cap]);
assert_eq!(aggregated.text[stdout_cap..], vec![b'b'; stderr_cap]);
}
#[test]
fn aggregate_output_fills_remaining_capacity_with_stderr() {
let stdout_len = EXEC_OUTPUT_MAX_BYTES / 10;
let stdout = StreamOutput {
text: vec![b'a'; stdout_len],
truncated_after_lines: None,
};
let stderr = StreamOutput {
text: vec![b'b'; EXEC_OUTPUT_MAX_BYTES],
truncated_after_lines: None,
};
let aggregated = aggregate_output(&stdout, &stderr);
let stderr_cap = EXEC_OUTPUT_MAX_BYTES.saturating_sub(stdout_len);
assert_eq!(aggregated.text.len(), EXEC_OUTPUT_MAX_BYTES);
assert_eq!(aggregated.text[..stdout_len], vec![b'a'; stdout_len]);
assert_eq!(aggregated.text[stdout_len..], vec![b'b'; stderr_cap]);
}
#[test]
fn aggregate_output_rebalances_when_stderr_is_small() {
let stdout = StreamOutput {
text: vec![b'a'; EXEC_OUTPUT_MAX_BYTES],
truncated_after_lines: None,
};
let stderr = StreamOutput {
text: vec![b'b'; 1],
truncated_after_lines: None,
};
let aggregated = aggregate_output(&stdout, &stderr);
let stdout_len = EXEC_OUTPUT_MAX_BYTES.saturating_sub(1);
assert_eq!(aggregated.text.len(), EXEC_OUTPUT_MAX_BYTES);
assert_eq!(aggregated.text[..stdout_len], vec![b'a'; stdout_len]);
assert_eq!(aggregated.text[stdout_len..], vec![b'b'; 1]);
}
#[test]
fn aggregate_output_keeps_stdout_then_stderr_when_under_cap() {
let stdout = StreamOutput {
text: vec![b'a'; 4],
truncated_after_lines: None,
};
let stderr = StreamOutput {
text: vec![b'b'; 3],
truncated_after_lines: None,
};
let aggregated = aggregate_output(&stdout, &stderr);
let mut expected = Vec::new();
expected.extend_from_slice(&stdout.text);
expected.extend_from_slice(&stderr.text);
assert_eq!(aggregated.text, expected);
assert_eq!(aggregated.truncated_after_lines, None);
}
#[cfg(unix)]
#[test]
fn sandbox_detection_flags_sigsys_exit_code() {
@@ -1049,7 +878,6 @@ mod tests {
expiration: 500.into(),
env,
sandbox_permissions: SandboxPermissions::UseDefault,
windows_sandbox_level: codex_protocol::config_types::WindowsSandboxLevel::Disabled,
justification: None,
arg0: None,
};
@@ -1095,7 +923,6 @@ mod tests {
expiration: ExecExpiration::Cancellation(cancel_token),
env,
sandbox_permissions: SandboxPermissions::UseDefault,
windows_sandbox_level: codex_protocol::config_types::WindowsSandboxLevel::Disabled,
justification: None,
arg0: None,
};

View File

@@ -87,15 +87,6 @@ pub(crate) struct ExecPolicyManager {
policy: ArcSwap<Policy>,
}
pub(crate) struct ExecApprovalRequest<'a> {
pub(crate) features: &'a Features,
pub(crate) command: &'a [String],
pub(crate) approval_policy: AskForApproval,
pub(crate) sandbox_policy: &'a SandboxPolicy,
pub(crate) sandbox_permissions: SandboxPermissions,
pub(crate) prefix_rule: Option<Vec<String>>,
}
impl ExecPolicyManager {
pub(crate) fn new(policy: Arc<Policy>) -> Self {
Self {
@@ -121,16 +112,12 @@ impl ExecPolicyManager {
pub(crate) async fn create_exec_approval_requirement_for_command(
&self,
req: ExecApprovalRequest<'_>,
features: &Features,
command: &[String],
approval_policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
sandbox_permissions: SandboxPermissions,
) -> ExecApprovalRequirement {
let ExecApprovalRequest {
features,
command,
approval_policy,
sandbox_policy,
sandbox_permissions,
prefix_rule,
} = req;
let exec_policy = self.current();
let commands =
parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
@@ -144,12 +131,6 @@ impl ExecPolicyManager {
};
let evaluation = exec_policy.check_multiple(commands.iter(), &exec_policy_fallback);
let requested_amendment = derive_requested_execpolicy_amendment(
features,
prefix_rule.as_ref(),
&evaluation.matched_rules,
);
match evaluation.decision {
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
reason: derive_forbidden_reason(command, &evaluation),
@@ -163,11 +144,9 @@ impl ExecPolicyManager {
ExecApprovalRequirement::NeedsApproval {
reason: derive_prompt_reason(command, &evaluation),
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
requested_amendment.or_else(|| {
try_derive_execpolicy_amendment_for_prompt_rules(
&evaluation.matched_rules,
)
})
try_derive_execpolicy_amendment_for_prompt_rules(
&evaluation.matched_rules,
)
} else {
None
},
@@ -403,30 +382,6 @@ fn try_derive_execpolicy_amendment_for_allow_rules(
})
}
fn derive_requested_execpolicy_amendment(
features: &Features,
prefix_rule: Option<&Vec<String>>,
matched_rules: &[RuleMatch],
) -> Option<ExecPolicyAmendment> {
if !features.enabled(Feature::ExecPolicy) {
return None;
}
let prefix_rule = prefix_rule?;
if prefix_rule.is_empty() {
return None;
}
if matched_rules
.iter()
.any(|rule_match| is_policy_match(rule_match) && rule_match.decision() == Decision::Prompt)
{
return None;
}
Some(ExecPolicyAmendment::new(prefix_rule.clone()))
}
/// Only return a reason when a policy rule drove the prompt decision.
fn derive_prompt_reason(command_args: &[String], evaluation: &Evaluation) -> Option<String> {
let command = render_shlex_command(command_args);
@@ -801,14 +756,13 @@ prefix_rule(pattern=["rm"], decision="forbidden")
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &forbidden_script,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&forbidden_script,
AskForApproval::OnRequest,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -836,18 +790,17 @@ prefix_rule(
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &[
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&[
"rm".to_string(),
"-rf".to_string(),
"/some/important/folder".to_string(),
],
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
AskForApproval::OnRequest,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -870,14 +823,13 @@ prefix_rule(
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::OnRequest,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -901,14 +853,13 @@ prefix_rule(
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::Never,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::Never,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -925,14 +876,13 @@ prefix_rule(
let manager = ExecPolicyManager::default();
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -944,40 +894,6 @@ prefix_rule(
);
}
#[tokio::test]
async fn request_rule_uses_prefix_rule() {
let command = vec![
"cargo".to_string(),
"install".to_string(),
"cargo-insta".to_string(),
];
let manager = ExecPolicyManager::default();
let mut features = Features::with_defaults();
features.enable(Feature::RequestRule);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &features,
command: &command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::RequireEscalated,
prefix_rule: Some(vec!["cargo".to_string(), "install".to_string()]),
})
.await;
assert_eq!(
requirement,
ExecApprovalRequirement::NeedsApproval {
reason: None,
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![
"cargo".to_string(),
"install".to_string(),
])),
}
);
}
#[tokio::test]
async fn heuristics_apply_when_other_commands_match_policy() {
let policy_src = r#"prefix_rule(pattern=["apple"], decision="allow")"#;
@@ -994,14 +910,13 @@ prefix_rule(
assert_eq!(
ExecPolicyManager::new(policy)
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await,
ExecApprovalRequirement::NeedsApproval {
reason: None,
@@ -1069,14 +984,13 @@ prefix_rule(
let manager = ExecPolicyManager::default();
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1097,14 +1011,13 @@ prefix_rule(
let manager = ExecPolicyManager::default();
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &features,
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&features,
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1128,14 +1041,13 @@ prefix_rule(
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::OnRequest,
&SandboxPolicy::DangerFullAccess,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1156,14 +1068,13 @@ prefix_rule(
];
let manager = ExecPolicyManager::default();
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1195,14 +1106,13 @@ prefix_rule(
assert_eq!(
ExecPolicyManager::new(policy)
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::UnlessTrusted,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::UnlessTrusted,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await,
ExecApprovalRequirement::NeedsApproval {
reason: None,
@@ -1219,14 +1129,13 @@ prefix_rule(
let manager = ExecPolicyManager::default();
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::OnRequest,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1250,14 +1159,13 @@ prefix_rule(
let manager = ExecPolicyManager::new(policy);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &Features::with_defaults(),
command: &command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: SandboxPermissions::UseDefault,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&Features::with_defaults(),
&command,
AskForApproval::OnRequest,
&SandboxPolicy::ReadOnly,
SandboxPermissions::UseDefault,
)
.await;
assert_eq!(
@@ -1318,14 +1226,13 @@ prefix_rule(
assert_eq!(
expected_req,
policy
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &features,
command: &sneaky_command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: permissions,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&features,
&sneaky_command,
AskForApproval::OnRequest,
&SandboxPolicy::ReadOnly,
permissions,
)
.await,
"{pwsh_approval_reason}"
);
@@ -1342,14 +1249,13 @@ prefix_rule(
]))),
},
policy
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &features,
command: &dangerous_command,
approval_policy: AskForApproval::OnRequest,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: permissions,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&features,
&dangerous_command,
AskForApproval::OnRequest,
&SandboxPolicy::ReadOnly,
permissions,
)
.await,
r#"On all platforms, a forbidden command should require approval
(unless AskForApproval::Never is specified)."#
@@ -1362,14 +1268,13 @@ prefix_rule(
reason: "`rm -rf /important/data` rejected: blocked by policy".to_string(),
},
policy
.create_exec_approval_requirement_for_command(ExecApprovalRequest {
features: &features,
command: &dangerous_command,
approval_policy: AskForApproval::Never,
sandbox_policy: &SandboxPolicy::ReadOnly,
sandbox_permissions: permissions,
prefix_rule: None,
})
.create_exec_approval_requirement_for_command(
&features,
&dangerous_command,
AskForApproval::Never,
&SandboxPolicy::ReadOnly,
permissions,
)
.await,
r#"On all platforms, a forbidden command should require approval
(unless AskForApproval::Never is specified)."#

View File

@@ -5,20 +5,14 @@
//! booleans through multiple types, call sites consult a single `Features`
//! container attached to `Config`.
use crate::config::CONFIG_TOML_FILE;
use crate::config::Config;
use crate::config::ConfigToml;
use crate::config::profile::ConfigProfile;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::WarningEvent;
use codex_otel::OtelManager;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use toml::Value as TomlValue;
mod legacy;
pub(crate) use legacy::LegacyFeatureToggles;
@@ -89,8 +83,6 @@ pub enum Feature {
WebSearchCached,
/// Gate the execpolicy enforcement for shell/unified exec.
ExecPolicy,
/// Allow the model to request approval and propose exec rules.
RequestRule,
/// Enable Windows sandbox (restricted token) on Windows.
WindowsSandbox,
/// Use the elevated Windows sandbox pipeline (setup + runner).
@@ -101,8 +93,6 @@ pub enum Feature {
RemoteModels,
/// Experimental shell snapshotting.
ShellSnapshot,
/// Persist rollout metadata to a local SQLite database.
Sqlite,
/// Append additional AGENTS.md guidance to user instructions.
ChildAgentsMd,
/// Enforce UTF8 output in Powershell.
@@ -111,18 +101,12 @@ pub enum Feature {
EnableRequestCompression,
/// Enable collab tools.
Collab,
/// Enable apps.
Apps,
/// Allow prompting and installing missing MCP dependencies.
SkillMcpDependencyInstall,
/// Prompt for missing skill env var dependencies.
SkillEnvVarDependencyPrompt,
/// Enable connectors (apps).
Connectors,
/// Steer feature flag - when enabled, Enter submits immediately instead of queuing.
Steer,
/// Enable collaboration modes (Plan, Code, Pair Programming, Execute).
CollaborationModes,
/// Enable personality selection in the TUI.
Personality,
/// Use the Responses API WebSocket transport for OpenAI by default.
ResponsesWebsockets,
}
@@ -152,8 +136,6 @@ impl Feature {
pub struct LegacyFeatureUsage {
pub alias: String,
pub feature: Feature,
pub summary: String,
pub details: Option<String>,
}
/// Holds the effective set of enabled features.
@@ -210,12 +192,9 @@ impl Features {
}
pub fn record_legacy_usage_force(&mut self, alias: &str, feature: Feature) {
let (summary, details) = legacy_usage_notice(alias, feature);
self.legacy_usages.insert(LegacyFeatureUsage {
alias: alias.to_string(),
feature,
summary,
details,
});
}
@@ -226,8 +205,10 @@ impl Features {
self.record_legacy_usage_force(alias, feature);
}
pub fn legacy_feature_usages(&self) -> impl Iterator<Item = &LegacyFeatureUsage> + '_ {
self.legacy_usages.iter()
pub fn legacy_feature_usages(&self) -> impl Iterator<Item = (&str, Feature)> + '_ {
self.legacy_usages
.iter()
.map(|usage| (usage.alias.as_str(), usage.feature))
}
pub fn emit_metrics(&self, otel: &OtelManager) {
@@ -248,21 +229,6 @@ impl Features {
/// Apply a table of key -> bool toggles (e.g. from TOML).
pub fn apply_map(&mut self, m: &BTreeMap<String, bool>) {
for (k, v) in m {
match k.as_str() {
"web_search_request" => {
self.record_legacy_usage_force(
"features.web_search_request",
Feature::WebSearchRequest,
);
}
"web_search_cached" => {
self.record_legacy_usage_force(
"features.web_search_cached",
Feature::WebSearchCached,
);
}
_ => {}
}
match feature_for_key(k) {
Some(feat) => {
if k != feat.key() {
@@ -323,42 +289,6 @@ impl Features {
}
}
fn legacy_usage_notice(alias: &str, feature: Feature) -> (String, Option<String>) {
let canonical = feature.key();
match feature {
Feature::WebSearchRequest | Feature::WebSearchCached => {
let label = match alias {
"web_search" => "[features].web_search",
"tools.web_search" => "[tools].web_search",
"features.web_search_request" | "web_search_request" => {
"[features].web_search_request"
}
"features.web_search_cached" | "web_search_cached" => {
"[features].web_search_cached"
}
_ => alias,
};
let summary = format!("`{label}` is deprecated. Use `web_search` instead.");
(summary, Some(web_search_details().to_string()))
}
_ => {
let summary = format!("`{alias}` is deprecated. Use `[features].{canonical}` instead.");
let details = if alias == canonical {
None
} else {
Some(format!(
"Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
))
};
(summary, details)
}
}
}
fn web_search_details() -> &'static str {
"Set `web_search` to `\"live\"`, `\"cached\"`, or `\"disabled\"` in config.toml."
}
/// Keys accepted in `[features]` tables.
fn feature_for_key(key: &str) -> Option<Feature> {
for spec in FEATURES {
@@ -407,13 +337,13 @@ pub const FEATURES: &[FeatureSpec] = &[
FeatureSpec {
id: Feature::WebSearchRequest,
key: "web_search_request",
stage: Stage::Deprecated,
stage: Stage::Stable,
default_enabled: false,
},
FeatureSpec {
id: Feature::WebSearchCached,
key: "web_search_cached",
stage: Stage::Deprecated,
stage: Stage::UnderDevelopment,
default_enabled: false,
},
// Experimental program. Rendered in the `/experimental` menu for users.
@@ -437,12 +367,6 @@ pub const FEATURES: &[FeatureSpec] = &[
},
default_enabled: false,
},
FeatureSpec {
id: Feature::Sqlite,
key: "sqlite",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::ChildAgentsMd,
key: "child_agents_md",
@@ -461,12 +385,6 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::UnderDevelopment,
default_enabled: true,
},
FeatureSpec {
id: Feature::RequestRule,
key: "request_rule",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::WindowsSandbox,
key: "experimental_windows_sandbox",
@@ -510,8 +428,8 @@ pub const FEATURES: &[FeatureSpec] = &[
FeatureSpec {
id: Feature::EnableRequestCompression,
key: "enable_request_compression",
stage: Stage::Stable,
default_enabled: true,
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::Collab,
@@ -520,24 +438,8 @@ pub const FEATURES: &[FeatureSpec] = &[
default_enabled: false,
},
FeatureSpec {
id: Feature::Apps,
key: "apps",
stage: Stage::Experimental {
name: "Apps",
menu_description: "Use a connected ChatGPT App using \"$\". Install Apps via /apps command. Restart Codex after enabling.",
announcement: "NEW: Use ChatGPT Apps (Connectors) in Codex via $ mentions. Enable in /experimental and restart Codex!",
},
default_enabled: false,
},
FeatureSpec {
id: Feature::SkillMcpDependencyInstall,
key: "skill_mcp_dependency_install",
stage: Stage::Stable,
default_enabled: true,
},
FeatureSpec {
id: Feature::SkillEnvVarDependencyPrompt,
key: "skill_env_var_dependency_prompt",
id: Feature::Connectors,
key: "connectors",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
@@ -557,12 +459,6 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::Personality,
key: "personality",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::ResponsesWebsockets,
key: "responses_websockets",
@@ -570,54 +466,3 @@ pub const FEATURES: &[FeatureSpec] = &[
default_enabled: false,
},
];
/// Push a warning event if any under-development features are enabled.
pub fn maybe_push_unstable_features_warning(
config: &Config,
post_session_configured_events: &mut Vec<Event>,
) {
if config.suppress_unstable_features_warning {
return;
}
let mut under_development_feature_keys = Vec::new();
if let Some(table) = config
.config_layer_stack
.effective_config()
.get("features")
.and_then(TomlValue::as_table)
{
for (key, value) in table {
if value.as_bool() != Some(true) {
continue;
}
let Some(spec) = FEATURES.iter().find(|spec| spec.key == key.as_str()) else {
continue;
};
if !config.features.enabled(spec.id) {
continue;
}
if matches!(spec.stage, Stage::UnderDevelopment) {
under_development_feature_keys.push(spec.key.to_string());
}
}
}
if under_development_feature_keys.is_empty() {
return;
}
let under_development_feature_keys = under_development_feature_keys.join(", ");
let config_path = config
.codex_home
.join(CONFIG_TOML_FILE)
.display()
.to_string();
let message = format!(
"Under-development features enabled: {under_development_feature_keys}. Under-development features are incomplete and may behave unpredictably. To suppress this warning, set `suppress_unstable_features_warning = true` in {config_path}."
);
post_session_configured_events.push(Event {
id: "".to_owned(),
msg: EventMsg::Warning(WarningEvent { message }),
});
}

View File

@@ -9,10 +9,6 @@ struct Alias {
}
const ALIASES: &[Alias] = &[
Alias {
legacy_key: "connectors",
feature: Feature::Apps,
},
Alias {
legacy_key: "enable_experimental_windows_sandbox",
feature: Feature::WindowsSandbox,

View File

@@ -38,12 +38,10 @@ pub mod landlock;
pub mod mcp;
mod mcp_connection_manager;
pub mod models_manager;
mod transport_manager;
pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY;
pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD;
pub use mcp_connection_manager::SandboxState;
mod mcp_tool_call;
mod mentions;
mod message_history;
mod model_provider_info;
pub mod parse_command;
@@ -71,7 +69,6 @@ mod event_mapping;
pub mod review_format;
pub mod review_prompts;
mod thread_manager;
pub mod web_search;
pub use codex_protocol::protocol::InitialHistory;
pub use thread_manager::NewThread;
pub use thread_manager::ThreadManager;
@@ -93,21 +90,18 @@ pub mod shell;
pub mod shell_snapshot;
pub mod skills;
pub mod spawn;
pub mod state_db;
pub mod terminal;
mod tools;
pub mod turn_diff_tracker;
pub use rollout::ARCHIVED_SESSIONS_SUBDIR;
pub use rollout::INTERACTIVE_SESSION_SOURCES;
pub use rollout::RolloutRecorder;
pub use rollout::RolloutRecorderParams;
pub use rollout::SESSIONS_SUBDIR;
pub use rollout::SessionMeta;
pub use rollout::find_archived_thread_path_by_id_str;
#[deprecated(note = "use find_thread_path_by_id_str")]
pub use rollout::find_conversation_path_by_id_str;
pub use rollout::find_thread_path_by_id_str;
pub use rollout::find_thread_path_by_name_str;
pub use rollout::list::Cursor;
pub use rollout::list::ThreadItem;
pub use rollout::list::ThreadSortKey;
@@ -116,7 +110,6 @@ pub use rollout::list::parse_cursor;
pub use rollout::list::read_head_for_summary;
pub use rollout::list::read_session_meta_line;
pub use rollout::rollout_date_parts;
pub use transport_manager::TransportManager;
mod function_tool;
mod state;
mod tasks;
@@ -132,6 +125,9 @@ pub use exec_policy::ExecPolicyError;
pub use exec_policy::check_execpolicy_for_warnings;
pub use exec_policy::load_exec_policy;
pub use safety::get_platform_sandbox;
pub use safety::is_windows_elevated_sandbox_enabled;
pub use safety::set_windows_elevated_sandbox_enabled;
pub use safety::set_windows_sandbox_enabled;
pub use tools::spec::parse_tool_input_schema;
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
// `codex_core::protocol::...` references continue to work across the workspace.

View File

@@ -4,53 +4,12 @@ use anyhow::Result;
use codex_protocol::protocol::McpAuthStatus;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_rmcp_client::determine_streamable_http_auth_status;
use codex_rmcp_client::supports_oauth_login;
use futures::future::join_all;
use tracing::warn;
use crate::config::types::McpServerConfig;
use crate::config::types::McpServerTransportConfig;
#[derive(Debug, Clone)]
pub struct McpOAuthLoginConfig {
pub url: String,
pub http_headers: Option<HashMap<String, String>>,
pub env_http_headers: Option<HashMap<String, String>>,
}
#[derive(Debug)]
pub enum McpOAuthLoginSupport {
Supported(McpOAuthLoginConfig),
Unsupported,
Unknown(anyhow::Error),
}
pub async fn oauth_login_support(transport: &McpServerTransportConfig) -> McpOAuthLoginSupport {
let McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var,
http_headers,
env_http_headers,
} = transport
else {
return McpOAuthLoginSupport::Unsupported;
};
if bearer_token_env_var.is_some() {
return McpOAuthLoginSupport::Unsupported;
}
match supports_oauth_login(url).await {
Ok(true) => McpOAuthLoginSupport::Supported(McpOAuthLoginConfig {
url: url.clone(),
http_headers: http_headers.clone(),
env_http_headers: env_http_headers.clone(),
}),
Ok(false) => McpOAuthLoginSupport::Unsupported,
Err(err) => McpOAuthLoginSupport::Unknown(err),
}
}
#[derive(Debug, Clone)]
pub struct McpAuthStatusEntry {
pub config: McpServerConfig,

View File

@@ -1,12 +1,7 @@
pub mod auth;
mod skill_dependencies;
pub(crate) use skill_dependencies::maybe_prompt_and_install_mcp_dependencies;
use std::collections::HashMap;
use std::env;
use std::path::PathBuf;
use std::time::Duration;
use async_channel::unbounded;
use codex_protocol::protocol::McpListToolsResponseEvent;
@@ -26,7 +21,7 @@ use crate::mcp_connection_manager::SandboxState;
const MCP_TOOL_NAME_PREFIX: &str = "mcp";
const MCP_TOOL_NAME_DELIMITER: &str = "__";
pub(crate) const CODEX_APPS_MCP_SERVER_NAME: &str = "codex_apps";
pub(crate) const CODEX_APPS_MCP_SERVER_NAME: &str = "codex_apps_mcp";
const CODEX_CONNECTORS_TOKEN_ENV_VAR: &str = "CODEX_CONNECTORS_TOKEN";
fn codex_apps_mcp_bearer_token_env_var() -> Option<String> {
@@ -98,7 +93,7 @@ fn codex_apps_mcp_server_config(config: &Config, auth: Option<&CodexAuth>) -> Mc
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: Some(Duration::from_secs(30)),
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
@@ -129,7 +124,7 @@ pub(crate) fn effective_mcp_servers(
) -> HashMap<String, McpServerConfig> {
with_codex_apps_mcp(
config.mcp_servers.get().clone(),
config.features.enabled(Feature::Apps),
config.features.enabled(Feature::Connectors),
auth,
config,
)

View File

@@ -1,519 +0,0 @@
use std::collections::HashMap;
use std::collections::HashSet;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::request_user_input::RequestUserInputArgs;
use codex_protocol::request_user_input::RequestUserInputQuestion;
use codex_protocol::request_user_input::RequestUserInputQuestionOption;
use codex_protocol::request_user_input::RequestUserInputResponse;
use codex_rmcp_client::perform_oauth_login;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use super::auth::McpOAuthLoginSupport;
use super::auth::oauth_login_support;
use super::effective_mcp_servers;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::config::Config;
use crate::config::edit::ConfigEditsBuilder;
use crate::config::load_global_mcp_servers;
use crate::config::types::McpServerConfig;
use crate::config::types::McpServerTransportConfig;
use crate::default_client::is_first_party_originator;
use crate::default_client::originator;
use crate::features::Feature;
use crate::skills::SkillMetadata;
use crate::skills::model::SkillToolDependency;
const SKILL_MCP_DEPENDENCY_PROMPT_ID: &str = "skill_mcp_dependency_install";
const MCP_DEPENDENCY_OPTION_INSTALL: &str = "Install";
const MCP_DEPENDENCY_OPTION_SKIP: &str = "Continue anyway";
fn is_full_access_mode(turn_context: &TurnContext) -> bool {
matches!(turn_context.approval_policy, AskForApproval::Never)
&& matches!(
turn_context.sandbox_policy,
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
)
}
fn format_missing_mcp_dependencies(missing: &HashMap<String, McpServerConfig>) -> String {
let mut names = missing.keys().cloned().collect::<Vec<_>>();
names.sort();
names.join(", ")
}
async fn filter_prompted_mcp_dependencies(
sess: &Session,
missing: &HashMap<String, McpServerConfig>,
) -> HashMap<String, McpServerConfig> {
let prompted = sess.mcp_dependency_prompted().await;
if prompted.is_empty() {
return missing.clone();
}
missing
.iter()
.filter(|(name, config)| !prompted.contains(&canonical_mcp_server_key(name, config)))
.map(|(name, config)| (name.clone(), config.clone()))
.collect()
}
async fn should_install_mcp_dependencies(
sess: &Session,
turn_context: &TurnContext,
missing: &HashMap<String, McpServerConfig>,
cancellation_token: &CancellationToken,
) -> bool {
if is_full_access_mode(turn_context) {
return true;
}
let server_list = format_missing_mcp_dependencies(missing);
let question = RequestUserInputQuestion {
id: SKILL_MCP_DEPENDENCY_PROMPT_ID.to_string(),
header: "Install MCP servers?".to_string(),
question: format!(
"The following MCP servers are required by the selected skills but are not installed yet: {server_list}. Install them now?"
),
is_other: false,
is_secret: false,
options: Some(vec![
RequestUserInputQuestionOption {
label: MCP_DEPENDENCY_OPTION_INSTALL.to_string(),
description:
"Install and enable the missing MCP servers in your global config."
.to_string(),
},
RequestUserInputQuestionOption {
label: MCP_DEPENDENCY_OPTION_SKIP.to_string(),
description: "Skip installation for now and do not show again for these MCP servers in this session."
.to_string(),
},
]),
};
let args = RequestUserInputArgs {
questions: vec![question],
};
let sub_id = &turn_context.sub_id;
let call_id = format!("mcp-deps-{sub_id}");
let response_fut = sess.request_user_input(turn_context, call_id, args);
let response = tokio::select! {
biased;
_ = cancellation_token.cancelled() => {
let empty = RequestUserInputResponse {
answers: HashMap::new(),
};
sess.notify_user_input_response(sub_id, empty.clone()).await;
empty
}
response = response_fut => response.unwrap_or_else(|| RequestUserInputResponse {
answers: HashMap::new(),
}),
};
let install = response
.answers
.get(SKILL_MCP_DEPENDENCY_PROMPT_ID)
.is_some_and(|answer| {
answer
.answers
.iter()
.any(|entry| entry == MCP_DEPENDENCY_OPTION_INSTALL)
});
let prompted_keys = missing
.iter()
.map(|(name, config)| canonical_mcp_server_key(name, config));
sess.record_mcp_dependency_prompted(prompted_keys).await;
install
}
pub(crate) async fn maybe_prompt_and_install_mcp_dependencies(
sess: &Session,
turn_context: &TurnContext,
cancellation_token: &CancellationToken,
mentioned_skills: &[SkillMetadata],
) {
let originator_value = originator().value;
if !is_first_party_originator(originator_value.as_str()) {
// Only support first-party clients for now.
return;
}
let config = turn_context.client.config();
if mentioned_skills.is_empty() || !config.features.enabled(Feature::SkillMcpDependencyInstall) {
return;
}
let installed = config.mcp_servers.get().clone();
let missing = collect_missing_mcp_dependencies(mentioned_skills, &installed);
if missing.is_empty() {
return;
}
let unprompted_missing = filter_prompted_mcp_dependencies(sess, &missing).await;
if unprompted_missing.is_empty() {
return;
}
if should_install_mcp_dependencies(sess, turn_context, &unprompted_missing, cancellation_token)
.await
{
maybe_install_mcp_dependencies(sess, turn_context, config.as_ref(), mentioned_skills).await;
}
}
pub(crate) async fn maybe_install_mcp_dependencies(
sess: &Session,
turn_context: &TurnContext,
config: &Config,
mentioned_skills: &[SkillMetadata],
) {
if mentioned_skills.is_empty() || !config.features.enabled(Feature::SkillMcpDependencyInstall) {
return;
}
let codex_home = config.codex_home.clone();
let installed = config.mcp_servers.get().clone();
let missing = collect_missing_mcp_dependencies(mentioned_skills, &installed);
if missing.is_empty() {
return;
}
let mut servers = match load_global_mcp_servers(&codex_home).await {
Ok(servers) => servers,
Err(err) => {
warn!("failed to load MCP servers while installing skill dependencies: {err}");
return;
}
};
let mut updated = false;
let mut added = Vec::new();
for (name, config) in missing {
if servers.contains_key(&name) {
continue;
}
servers.insert(name.clone(), config.clone());
added.push((name, config));
updated = true;
}
if !updated {
return;
}
if let Err(err) = ConfigEditsBuilder::new(&codex_home)
.replace_mcp_servers(&servers)
.apply()
.await
{
warn!("failed to persist MCP dependencies for mentioned skills: {err}");
return;
}
for (name, server_config) in added {
let oauth_config = match oauth_login_support(&server_config.transport).await {
McpOAuthLoginSupport::Supported(config) => config,
McpOAuthLoginSupport::Unsupported => continue,
McpOAuthLoginSupport::Unknown(err) => {
warn!("MCP server may or may not require login for dependency {name}: {err}");
continue;
}
};
sess.notify_background_event(
turn_context,
format!(
"Authenticating MCP {name}... Follow instructions in your browser if prompted."
),
)
.await;
if let Err(err) = perform_oauth_login(
&name,
&oauth_config.url,
config.mcp_oauth_credentials_store_mode,
oauth_config.http_headers,
oauth_config.env_http_headers,
&[],
config.mcp_oauth_callback_port,
)
.await
{
warn!("failed to login to MCP dependency {name}: {err}");
}
}
// Refresh from the effective merged MCP map (global + repo + managed) and
// overlay the updated global servers so we don't drop repo-scoped servers.
let auth = sess.services.auth_manager.auth().await;
let mut refresh_servers = effective_mcp_servers(config, auth.as_ref());
for (name, server_config) in &servers {
refresh_servers
.entry(name.clone())
.or_insert_with(|| server_config.clone());
}
sess.refresh_mcp_servers_now(
turn_context,
refresh_servers,
config.mcp_oauth_credentials_store_mode,
)
.await;
}
fn canonical_mcp_key(transport: &str, identifier: &str, fallback: &str) -> String {
let identifier = identifier.trim();
if identifier.is_empty() {
fallback.to_string()
} else {
format!("mcp__{transport}__{identifier}")
}
}
fn canonical_mcp_server_key(name: &str, config: &McpServerConfig) -> String {
match &config.transport {
McpServerTransportConfig::Stdio { command, .. } => {
canonical_mcp_key("stdio", command, name)
}
McpServerTransportConfig::StreamableHttp { url, .. } => {
canonical_mcp_key("streamable_http", url, name)
}
}
}
fn canonical_mcp_dependency_key(dependency: &SkillToolDependency) -> Result<String, String> {
let transport = dependency.transport.as_deref().unwrap_or("streamable_http");
if transport.eq_ignore_ascii_case("streamable_http") {
let url = dependency
.url
.as_ref()
.ok_or_else(|| "missing url for streamable_http dependency".to_string())?;
return Ok(canonical_mcp_key("streamable_http", url, &dependency.value));
}
if transport.eq_ignore_ascii_case("stdio") {
let command = dependency
.command
.as_ref()
.ok_or_else(|| "missing command for stdio dependency".to_string())?;
return Ok(canonical_mcp_key("stdio", command, &dependency.value));
}
Err(format!("unsupported transport {transport}"))
}
pub(crate) fn collect_missing_mcp_dependencies(
mentioned_skills: &[SkillMetadata],
installed: &HashMap<String, McpServerConfig>,
) -> HashMap<String, McpServerConfig> {
let mut missing = HashMap::new();
let installed_keys: HashSet<String> = installed
.iter()
.map(|(name, config)| canonical_mcp_server_key(name, config))
.collect();
let mut seen_canonical_keys = HashSet::new();
for skill in mentioned_skills {
let Some(dependencies) = skill.dependencies.as_ref() else {
continue;
};
for tool in &dependencies.tools {
if !tool.r#type.eq_ignore_ascii_case("mcp") {
continue;
}
let dependency_key = match canonical_mcp_dependency_key(tool) {
Ok(key) => key,
Err(err) => {
let dependency = tool.value.as_str();
let skill_name = skill.name.as_str();
warn!(
"unable to auto-install MCP dependency {dependency} for skill {skill_name}: {err}",
);
continue;
}
};
if installed_keys.contains(&dependency_key)
|| seen_canonical_keys.contains(&dependency_key)
{
continue;
}
let config = match mcp_dependency_to_server_config(tool) {
Ok(config) => config,
Err(err) => {
let dependency = dependency_key.as_str();
let skill_name = skill.name.as_str();
warn!(
"unable to auto-install MCP dependency {dependency} for skill {skill_name}: {err}",
);
continue;
}
};
missing.insert(tool.value.clone(), config);
seen_canonical_keys.insert(dependency_key);
}
}
missing
}
fn mcp_dependency_to_server_config(
dependency: &SkillToolDependency,
) -> Result<McpServerConfig, String> {
let transport = dependency.transport.as_deref().unwrap_or("streamable_http");
if transport.eq_ignore_ascii_case("streamable_http") {
let url = dependency
.url
.as_ref()
.ok_or_else(|| "missing url for streamable_http dependency".to_string())?;
return Ok(McpServerConfig {
transport: McpServerTransportConfig::StreamableHttp {
url: url.clone(),
bearer_token_env_var: None,
http_headers: None,
env_http_headers: None,
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
});
}
if transport.eq_ignore_ascii_case("stdio") {
let command = dependency
.command
.as_ref()
.ok_or_else(|| "missing command for stdio dependency".to_string())?;
return Ok(McpServerConfig {
transport: McpServerTransportConfig::Stdio {
command: command.clone(),
args: Vec::new(),
env: None,
env_vars: Vec::new(),
cwd: None,
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
});
}
Err(format!("unsupported transport {transport}"))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::skills::model::SkillDependencies;
use codex_protocol::protocol::SkillScope;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
fn skill_with_tools(tools: Vec<SkillToolDependency>) -> SkillMetadata {
SkillMetadata {
name: "skill".to_string(),
description: "skill".to_string(),
short_description: None,
interface: None,
dependencies: Some(SkillDependencies { tools }),
path: PathBuf::from("skill"),
scope: SkillScope::User,
}
}
#[test]
fn collect_missing_respects_canonical_installed_key() {
let url = "https://example.com/mcp".to_string();
let skills = vec![skill_with_tools(vec![SkillToolDependency {
r#type: "mcp".to_string(),
value: "github".to_string(),
description: None,
transport: Some("streamable_http".to_string()),
command: None,
url: Some(url.clone()),
}])];
let installed = HashMap::from([(
"alias".to_string(),
McpServerConfig {
transport: McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var: None,
http_headers: None,
env_http_headers: None,
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
)]);
assert_eq!(
collect_missing_mcp_dependencies(&skills, &installed),
HashMap::new()
);
}
#[test]
fn collect_missing_dedupes_by_canonical_key_but_preserves_original_name() {
let url = "https://example.com/one".to_string();
let skills = vec![skill_with_tools(vec![
SkillToolDependency {
r#type: "mcp".to_string(),
value: "alias-one".to_string(),
description: None,
transport: Some("streamable_http".to_string()),
command: None,
url: Some(url.clone()),
},
SkillToolDependency {
r#type: "mcp".to_string(),
value: "alias-two".to_string(),
description: None,
transport: Some("streamable_http".to_string()),
command: None,
url: Some(url.clone()),
},
])];
let expected = HashMap::from([(
"alias-one".to_string(),
McpServerConfig {
transport: McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var: None,
http_headers: None,
env_http_headers: None,
},
enabled: true,
disabled_reason: None,
startup_timeout_sec: None,
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
)]);
assert_eq!(
collect_missing_mcp_dependencies(&skills, &HashMap::new()),
expected
);
}
}

View File

@@ -14,7 +14,6 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::mcp::CODEX_APPS_MCP_SERVER_NAME;
use crate::mcp::auth::McpAuthStatusEntry;
use anyhow::Context;
use anyhow::Result;
@@ -437,33 +436,13 @@ impl McpConnectionManager {
.await
}
pub(crate) async fn wait_for_server_ready(&self, server_name: &str, timeout: Duration) -> bool {
let Some(async_managed_client) = self.clients.get(server_name) else {
return false;
};
match tokio::time::timeout(timeout, async_managed_client.client()).await {
Ok(Ok(_)) => true,
Ok(Err(_)) | Err(_) => false,
}
}
/// Returns a single map that contains all tools. Each key is the
/// fully-qualified name for the tool.
#[instrument(level = "trace", skip_all)]
pub async fn list_all_tools(&self) -> HashMap<String, ToolInfo> {
let mut tools = HashMap::new();
for (server_name, managed_client) in &self.clients {
let client = if server_name == CODEX_APPS_MCP_SERVER_NAME {
// Avoid blocking on codex_apps_mcp startup; use tools only when ready.
match managed_client.client.clone().now_or_never() {
Some(Ok(client)) => Some(client),
_ => None,
}
} else {
managed_client.client().await.ok()
};
if let Some(client) = client {
for managed_client in self.clients.values() {
if let Ok(client) = managed_client.client().await {
tools.extend(qualify_tools(filter_tools(
client.tools,
client.tool_filter,
@@ -769,32 +748,6 @@ fn filter_tools(tools: Vec<ToolInfo>, filter: ToolFilter) -> Vec<ToolInfo> {
.collect()
}
fn normalize_codex_apps_tool_title(
server_name: &str,
connector_name: Option<&str>,
value: &str,
) -> String {
if server_name != CODEX_APPS_MCP_SERVER_NAME {
return value.to_string();
}
let Some(connector_name) = connector_name
.map(str::trim)
.filter(|name| !name.is_empty())
else {
return value.to_string();
};
let prefix = format!("{connector_name}_");
if let Some(stripped) = value.strip_prefix(&prefix)
&& !stripped.is_empty()
{
return stripped.to_string();
}
value.to_string()
}
fn resolve_bearer_token(
server_name: &str,
bearer_token_env_var: Option<&str>,
@@ -952,23 +905,12 @@ async fn list_tools_for_client(
Ok(resp
.tools
.into_iter()
.map(|tool| {
let connector_name = tool.connector_name;
let mut tool_def = tool.tool;
if let Some(title) = tool_def.title.as_deref() {
let normalized_title =
normalize_codex_apps_tool_title(server_name, connector_name.as_deref(), title);
if tool_def.title.as_deref() != Some(normalized_title.as_str()) {
tool_def.title = Some(normalized_title);
}
}
ToolInfo {
server_name: server_name.to_owned(),
tool_name: tool_def.name.clone(),
tool: tool_def,
connector_id: tool.connector_id,
connector_name,
}
.map(|tool| ToolInfo {
server_name: server_name.to_owned(),
tool_name: tool.tool.name.clone(),
tool: tool.tool,
connector_id: tool.connector_id,
connector_name: tool.connector_name,
})
.collect())
}

View File

@@ -1,30 +1,20 @@
use std::time::Duration;
use std::time::Instant;
use tracing::error;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::mcp::CODEX_APPS_MCP_SERVER_NAME;
use crate::protocol::EventMsg;
use crate::protocol::McpInvocation;
use crate::protocol::McpToolCallBeginEvent;
use crate::protocol::McpToolCallEndEvent;
use codex_protocol::models::FunctionCallOutputPayload;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::request_user_input::RequestUserInputArgs;
use codex_protocol::request_user_input::RequestUserInputQuestion;
use codex_protocol::request_user_input::RequestUserInputQuestionOption;
use codex_protocol::request_user_input::RequestUserInputResponse;
use mcp_types::ToolAnnotations;
use std::sync::Arc;
/// Handles the specified tool call dispatches the appropriate
/// `McpToolCallBegin` and `McpToolCallEnd` events to the `Session`.
pub(crate) async fn handle_mcp_tool_call(
sess: Arc<Session>,
sess: &Session,
turn_context: &TurnContext,
call_id: String,
server: String,
@@ -58,79 +48,11 @@ pub(crate) async fn handle_mcp_tool_call(
arguments: arguments_value.clone(),
};
if let Some(decision) =
maybe_request_mcp_tool_approval(sess.as_ref(), turn_context, &call_id, &server, &tool_name)
.await
{
let result = match decision {
McpToolApprovalDecision::Accept => {
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.clone(),
invocation: invocation.clone(),
});
notify_mcp_tool_call_event(sess.as_ref(), turn_context, tool_call_begin_event)
.await;
let start = Instant::now();
let result = sess
.call_tool(&server, &tool_name, arguments_value.clone())
.await
.map_err(|e| format!("tool call error: {e:?}"));
if let Err(e) = &result {
tracing::warn!("MCP tool call error: {e:?}");
}
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.clone(),
invocation,
duration: start.elapsed(),
result: result.clone(),
});
notify_mcp_tool_call_event(
sess.as_ref(),
turn_context,
tool_call_end_event.clone(),
)
.await;
result
}
McpToolApprovalDecision::Decline => {
let message = "user rejected MCP tool call".to_string();
notify_mcp_tool_call_skip(
sess.as_ref(),
turn_context,
&call_id,
invocation,
message,
)
.await
}
McpToolApprovalDecision::Cancel => {
let message = "user cancelled MCP tool call".to_string();
notify_mcp_tool_call_skip(
sess.as_ref(),
turn_context,
&call_id,
invocation,
message,
)
.await
}
};
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
.client
.get_otel_manager()
.counter("codex.mcp.call", 1, &[("status", status)]);
return ResponseInputItem::McpToolCallOutput { call_id, result };
}
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.clone(),
invocation: invocation.clone(),
});
notify_mcp_tool_call_event(sess.as_ref(), turn_context, tool_call_begin_event).await;
notify_mcp_tool_call_event(sess, turn_context, tool_call_begin_event).await;
let start = Instant::now();
// Perform the tool call.
@@ -148,7 +70,7 @@ pub(crate) async fn handle_mcp_tool_call(
result: result.clone(),
});
notify_mcp_tool_call_event(sess.as_ref(), turn_context, tool_call_end_event.clone()).await;
notify_mcp_tool_call_event(sess, turn_context, tool_call_end_event.clone()).await;
let status = if result.is_ok() { "ok" } else { "error" };
turn_context
@@ -162,236 +84,3 @@ pub(crate) async fn handle_mcp_tool_call(
async fn notify_mcp_tool_call_event(sess: &Session, turn_context: &TurnContext, event: EventMsg) {
sess.send_event(turn_context, event).await;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum McpToolApprovalDecision {
Accept,
Decline,
Cancel,
}
struct McpToolApprovalMetadata {
annotations: ToolAnnotations,
connector_name: Option<String>,
tool_title: Option<String>,
}
const MCP_TOOL_APPROVAL_QUESTION_ID_PREFIX: &str = "mcp_tool_call_approval";
const MCP_TOOL_APPROVAL_ACCEPT: &str = "Accept";
const MCP_TOOL_APPROVAL_DECLINE: &str = "Decline";
const MCP_TOOL_APPROVAL_CANCEL: &str = "Cancel";
async fn maybe_request_mcp_tool_approval(
sess: &Session,
turn_context: &TurnContext,
call_id: &str,
server: &str,
tool_name: &str,
) -> Option<McpToolApprovalDecision> {
if is_full_access_mode(turn_context) {
return None;
}
if server != CODEX_APPS_MCP_SERVER_NAME {
return None;
}
let metadata = lookup_mcp_tool_metadata(sess, server, tool_name).await?;
if !requires_mcp_tool_approval(&metadata.annotations) {
return None;
}
let question_id = format!("{MCP_TOOL_APPROVAL_QUESTION_ID_PREFIX}_{call_id}");
let question = build_mcp_tool_approval_question(
question_id.clone(),
tool_name,
metadata.tool_title.as_deref(),
metadata.connector_name.as_deref(),
&metadata.annotations,
);
let args = RequestUserInputArgs {
questions: vec![question],
};
let response = sess
.request_user_input(turn_context, call_id.to_string(), args)
.await;
Some(parse_mcp_tool_approval_response(response, &question_id))
}
fn is_full_access_mode(turn_context: &TurnContext) -> bool {
matches!(turn_context.approval_policy, AskForApproval::Never)
&& matches!(
turn_context.sandbox_policy,
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
)
}
async fn lookup_mcp_tool_metadata(
sess: &Session,
server: &str,
tool_name: &str,
) -> Option<McpToolApprovalMetadata> {
let tools = sess
.services
.mcp_connection_manager
.read()
.await
.list_all_tools()
.await;
tools.into_values().find_map(|tool_info| {
if tool_info.server_name == server && tool_info.tool_name == tool_name {
tool_info
.tool
.annotations
.map(|annotations| McpToolApprovalMetadata {
annotations,
connector_name: tool_info.connector_name,
tool_title: tool_info.tool.title,
})
} else {
None
}
})
}
fn build_mcp_tool_approval_question(
question_id: String,
tool_name: &str,
tool_title: Option<&str>,
connector_name: Option<&str>,
annotations: &ToolAnnotations,
) -> RequestUserInputQuestion {
let destructive = annotations.destructive_hint == Some(true);
let open_world = annotations.open_world_hint == Some(true);
let reason = match (destructive, open_world) {
(true, true) => "may modify data and access external systems",
(true, false) => "may modify or delete data",
(false, true) => "may access external systems",
(false, false) => "may have side effects",
};
let tool_label = tool_title.unwrap_or(tool_name);
let app_label = connector_name
.map(|name| format!("The {name} app"))
.unwrap_or_else(|| "This app".to_string());
let question = format!(
"{app_label} wants to run the tool \"{tool_label}\", which {reason}. Allow this action?"
);
RequestUserInputQuestion {
id: question_id,
header: "Approve app tool call?".to_string(),
question,
is_other: false,
is_secret: false,
options: Some(vec![
RequestUserInputQuestionOption {
label: MCP_TOOL_APPROVAL_ACCEPT.to_string(),
description: "Run the tool and continue.".to_string(),
},
RequestUserInputQuestionOption {
label: MCP_TOOL_APPROVAL_DECLINE.to_string(),
description: "Decline this tool call and continue.".to_string(),
},
RequestUserInputQuestionOption {
label: MCP_TOOL_APPROVAL_CANCEL.to_string(),
description: "Cancel this tool call".to_string(),
},
]),
}
}
fn parse_mcp_tool_approval_response(
response: Option<RequestUserInputResponse>,
question_id: &str,
) -> McpToolApprovalDecision {
let Some(response) = response else {
return McpToolApprovalDecision::Cancel;
};
let answers = response
.answers
.get(question_id)
.map(|answer| answer.answers.as_slice());
let Some(answers) = answers else {
return McpToolApprovalDecision::Cancel;
};
if answers
.iter()
.any(|answer| answer == MCP_TOOL_APPROVAL_ACCEPT)
{
McpToolApprovalDecision::Accept
} else if answers
.iter()
.any(|answer| answer == MCP_TOOL_APPROVAL_CANCEL)
{
McpToolApprovalDecision::Cancel
} else {
McpToolApprovalDecision::Decline
}
}
fn requires_mcp_tool_approval(annotations: &ToolAnnotations) -> bool {
annotations.read_only_hint == Some(false)
&& (annotations.destructive_hint == Some(true) || annotations.open_world_hint == Some(true))
}
async fn notify_mcp_tool_call_skip(
sess: &Session,
turn_context: &TurnContext,
call_id: &str,
invocation: McpInvocation,
message: String,
) -> Result<mcp_types::CallToolResult, String> {
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.to_string(),
invocation: invocation.clone(),
});
notify_mcp_tool_call_event(sess, turn_context, tool_call_begin_event).await;
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.to_string(),
invocation,
duration: Duration::ZERO,
result: Err(message.clone()),
});
notify_mcp_tool_call_event(sess, turn_context, tool_call_end_event).await;
Err(message)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn annotations(
read_only: Option<bool>,
destructive: Option<bool>,
open_world: Option<bool>,
) -> ToolAnnotations {
ToolAnnotations {
destructive_hint: destructive,
idempotent_hint: None,
open_world_hint: open_world,
read_only_hint: read_only,
title: None,
}
}
#[test]
fn approval_required_when_read_only_false_and_destructive() {
let annotations = annotations(Some(false), Some(true), None);
assert_eq!(requires_mcp_tool_approval(&annotations), true);
}
#[test]
fn approval_required_when_read_only_false_and_open_world() {
let annotations = annotations(Some(false), None, Some(true));
assert_eq!(requires_mcp_tool_approval(&annotations), true);
}
#[test]
fn approval_not_required_when_read_only_true() {
let annotations = annotations(Some(true), Some(true), Some(true));
assert_eq!(requires_mcp_tool_approval(&annotations), false);
}
}

View File

@@ -1,64 +0,0 @@
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::PathBuf;
use codex_protocol::user_input::UserInput;
use crate::connectors;
use crate::skills::SkillMetadata;
use crate::skills::injection::extract_tool_mentions;
pub(crate) struct CollectedToolMentions {
pub(crate) plain_names: HashSet<String>,
pub(crate) paths: HashSet<String>,
}
pub(crate) fn collect_tool_mentions_from_messages(messages: &[String]) -> CollectedToolMentions {
let mut plain_names = HashSet::new();
let mut paths = HashSet::new();
for message in messages {
let mentions = extract_tool_mentions(message);
plain_names.extend(mentions.plain_names().map(str::to_string));
paths.extend(mentions.paths().map(str::to_string));
}
CollectedToolMentions { plain_names, paths }
}
pub(crate) fn collect_explicit_app_paths(input: &[UserInput]) -> Vec<String> {
input
.iter()
.filter_map(|item| match item {
UserInput::Mention { path, .. } => Some(path.clone()),
_ => None,
})
.collect()
}
pub(crate) fn build_skill_name_counts(
skills: &[SkillMetadata],
disabled_paths: &HashSet<PathBuf>,
) -> (HashMap<String, usize>, HashMap<String, usize>) {
let mut exact_counts: HashMap<String, usize> = HashMap::new();
let mut lower_counts: HashMap<String, usize> = HashMap::new();
for skill in skills {
if disabled_paths.contains(&skill.path) {
continue;
}
*exact_counts.entry(skill.name.clone()).or_insert(0) += 1;
*lower_counts
.entry(skill.name.to_ascii_lowercase())
.or_insert(0) += 1;
}
(exact_counts, lower_counts)
}
pub(crate) fn build_connector_slug_counts(
connectors: &[connectors::AppInfo],
) -> HashMap<String, usize> {
let mut counts: HashMap<String, usize> = HashMap::new();
for connector in connectors {
let slug = connectors::connector_mention_slug(connector);
*counts.entry(slug).or_insert(0) += 1;
}
counts
}

View File

@@ -5,11 +5,10 @@
//! 2. User-defined entries inside `~/.codex/config.toml` under the `model_providers`
//! key. These override or extend the defaults at runtime.
use crate::auth::AuthMode;
use crate::error::EnvVarError;
use codex_api::Provider as ApiProvider;
use codex_api::WireApi as ApiWireApi;
use codex_api::provider::RetryConfig as ApiRetryConfig;
use codex_app_server_protocol::AuthMode;
use http::HeaderMap;
use http::header::HeaderName;
use http::header::HeaderValue;
@@ -20,6 +19,7 @@ use std::collections::HashMap;
use std::env::VarError;
use std::time::Duration;
use crate::error::EnvVarError;
const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000;
const DEFAULT_STREAM_MAX_RETRIES: u64 = 5;
const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
@@ -43,6 +43,10 @@ pub enum WireApi {
/// The Responses API exposed by OpenAI at `/v1/responses`.
Responses,
/// Experimental: Responses API over WebSocket transport.
#[serde(rename = "responses_websocket")]
ResponsesWebsocket,
/// Regular Chat Completions compatible with `/v1/chat/completions`.
#[default]
Chat,
@@ -101,10 +105,6 @@ pub struct ModelProviderInfo {
/// and API key (if needed) comes from the "env_key" environment variable.
#[serde(default)]
pub requires_openai_auth: bool,
/// Whether this provider supports the Responses API WebSocket transport.
#[serde(default)]
pub supports_websockets: bool,
}
impl ModelProviderInfo {
@@ -162,6 +162,7 @@ impl ModelProviderInfo {
query_params: self.query_params.clone(),
wire: match self.wire_api {
WireApi::Responses => ApiWireApi::Responses,
WireApi::ResponsesWebsocket => ApiWireApi::Responses,
WireApi::Chat => ApiWireApi::Chat,
},
headers,
@@ -253,7 +254,6 @@ impl ModelProviderInfo {
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: true,
supports_websockets: true,
}
}
@@ -332,7 +332,6 @@ pub fn create_oss_provider_with_base_url(base_url: &str, wire_api: WireApi) -> M
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
}
}
@@ -361,7 +360,6 @@ base_url = "http://localhost:11434/v1"
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
@@ -392,7 +390,6 @@ query_params = { api-version = "2025-04-01-preview" }
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
@@ -426,7 +423,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
@@ -458,7 +454,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let api = provider.to_api_provider(None).expect("api provider");
assert!(
@@ -481,7 +476,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let named_api = named_provider.to_api_provider(None).expect("api provider");
assert!(named_api.is_azure_responses_endpoint());
@@ -506,7 +500,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let api = provider.to_api_provider(None).expect("api provider");
assert!(

View File

@@ -28,7 +28,7 @@ fn plan_preset() -> CollaborationModeMask {
name: "Plan".to_string(),
mode: Some(ModeKind::Plan),
model: None,
reasoning_effort: Some(Some(ReasoningEffort::Medium)),
reasoning_effort: Some(Some(ReasoningEffort::High)),
developer_instructions: Some(Some(COLLABORATION_MODE_PLAN.to_string())),
}
}

View File

@@ -2,7 +2,6 @@ use super::cache::ModelsCacheManager;
use crate::api_bridge::auth_provider_from_auth;
use crate::api_bridge::map_api_error;
use crate::auth::AuthManager;
use crate::auth::AuthMode;
use crate::config::Config;
use crate::default_client::build_reqwest_client;
use crate::error::CodexErr;
@@ -14,6 +13,7 @@ use crate::models_manager::model_info;
use crate::models_manager::model_presets::builtin_model_presets;
use codex_api::ModelsClient;
use codex_api::ReqwestTransport;
use codex_app_server_protocol::AuthMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
@@ -61,7 +61,7 @@ impl ModelsManager {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
Self {
local_models: builtin_model_presets(auth_manager.get_internal_auth_mode()),
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
auth_manager,
etag: RwLock::new(None),
@@ -175,7 +175,7 @@ impl ModelsManager {
refresh_strategy: RefreshStrategy,
) -> CoreResult<()> {
if !config.features.enabled(Feature::RemoteModels)
|| self.auth_manager.get_internal_auth_mode() == Some(AuthMode::ApiKey)
|| self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey)
{
return Ok(());
}
@@ -204,8 +204,7 @@ impl ModelsManager {
let _timer =
codex_otel::start_global_timer("codex.remote_models.fetch_update.duration_ms", &[]);
let auth = self.auth_manager.auth().await;
let auth_mode = self.auth_manager.get_internal_auth_mode();
let api_provider = self.provider.to_api_provider(auth_mode)?;
let api_provider = self.provider.to_api_provider(Some(AuthMode::ChatGPT))?;
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider)?;
let transport = ReqwestTransport::new(build_reqwest_client());
let client = ModelsClient::new(transport, api_provider, api_auth);
@@ -272,10 +271,7 @@ impl ModelsManager {
let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect();
let existing_presets = self.local_models.clone();
let mut merged_presets = ModelPreset::merge(remote_presets, existing_presets);
let chatgpt_mode = matches!(
self.auth_manager.get_internal_auth_mode(),
Some(AuthMode::ChatGPT)
);
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
merged_presets = ModelPreset::filter_by_auth(merged_presets, chatgpt_mode);
for preset in &mut merged_presets {
@@ -319,7 +315,7 @@ impl ModelsManager {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
Self {
local_models: builtin_model_presets(auth_manager.get_internal_auth_mode()),
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
auth_manager,
etag: RwLock::new(None),
@@ -436,7 +432,6 @@ mod tests {
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
requires_openai_auth: false,
supports_websockets: false,
}
}

View File

@@ -1,17 +1,19 @@
use std::collections::BTreeMap;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ApplyPatchToolType;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelInstructionsVariables;
use codex_protocol::openai_models::ModelMessages;
use codex_protocol::openai_models::ModelInstructionsTemplate;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::PersonalityMessages;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::TruncationMode;
use codex_protocol::openai_models::TruncationPolicyConfig;
use crate::config::Config;
use crate::features::Feature;
use crate::truncate::approx_bytes_for_tokens;
use tracing::warn;
@@ -27,11 +29,8 @@ const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../../gpt-5.1-codex-m
const GPT_5_2_CODEX_INSTRUCTIONS: &str = include_str!("../../gpt-5.2-codex_prompt.md");
const GPT_5_2_CODEX_INSTRUCTIONS_TEMPLATE: &str =
include_str!("../../templates/model_instructions/gpt-5.2-codex_instructions_template.md");
const GPT_5_2_CODEX_PERSONALITY_FRIENDLY: &str =
include_str!("../../templates/personalities/gpt-5.2-codex_friendly.md");
const GPT_5_2_CODEX_PERSONALITY_PRAGMATIC: &str =
include_str!("../../templates/personalities/gpt-5.2-codex_pragmatic.md");
const PERSONALITY_FRIENDLY: &str = include_str!("../../templates/personalities/friendly.md");
const PERSONALITY_PRAGMATIC: &str = include_str!("../../templates/personalities/pragmatic.md");
pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000;
@@ -55,7 +54,7 @@ macro_rules! model_info {
priority: 99,
upgrade: None,
base_instructions: BASE_INSTRUCTIONS.to_string(),
model_messages: None,
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
@@ -101,11 +100,8 @@ pub(crate) fn with_config_overrides(mut model: ModelInfo, config: &Config) -> Mo
if let Some(base_instructions) = &config.base_instructions {
model.base_instructions = base_instructions.clone();
model.model_messages = None;
} else if !config.features.enabled(Feature::Personality) {
model.model_messages = None;
model.model_instructions_template = None;
}
model
}
@@ -173,13 +169,15 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
model_info!(
slug,
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
model_messages: Some(ModelMessages {
instructions_template: Some(GPT_5_2_CODEX_INSTRUCTIONS_TEMPLATE.to_string()),
instructions_variables: Some(ModelInstructionsVariables {
personality_default: Some("".to_string()),
personality_friendly: Some(GPT_5_2_CODEX_PERSONALITY_FRIENDLY.to_string()),
personality_pragmatic: Some(GPT_5_2_CODEX_PERSONALITY_PRAGMATIC.to_string()),
}),
model_instructions_template: Some(ModelInstructionsTemplate {
template: GPT_5_2_CODEX_INSTRUCTIONS_TEMPLATE.to_string(),
personality_messages: Some(PersonalityMessages(BTreeMap::from([(
Personality::Friendly,
PERSONALITY_FRIENDLY.to_string(),
), (
Personality::Pragmatic,
PERSONALITY_PRAGMATIC.to_string(),
)]))),
}),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
shell_type: ConfigShellToolType::ShellCommand,
@@ -215,15 +213,6 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
truncation_policy: TruncationPolicyConfig::tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
supported_reasoning_levels: supported_reasoning_level_low_medium_high_xhigh(),
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
model_messages: Some(ModelMessages {
instructions_template: Some(GPT_5_2_CODEX_INSTRUCTIONS_TEMPLATE.to_string()),
instructions_variables: Some(ModelInstructionsVariables {
personality_default: Some("".to_string()),
personality_friendly: Some(GPT_5_2_CODEX_PERSONALITY_FRIENDLY.to_string()),
personality_pragmatic: Some(GPT_5_2_CODEX_PERSONALITY_PRAGMATIC.to_string()),
}),
}),
)
} else if slug.starts_with("gpt-5.1-codex-max") {
model_info!(
@@ -270,7 +259,9 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
truncation_policy: TruncationPolicyConfig::tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("gpt-5.2") || slug.starts_with("boomslang") {
} else if (slug.starts_with("gpt-5.2") || slug.starts_with("boomslang"))
&& !slug.contains("codex")
{
model_info!(
slug,
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
@@ -285,7 +276,7 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
context_window: Some(CONTEXT_WINDOW_272K),
supported_reasoning_levels: supported_reasoning_level_low_medium_high_xhigh_non_codex(),
)
} else if slug.starts_with("gpt-5.1") {
} else if slug.starts_with("gpt-5.1") && !slug.contains("codex") {
model_info!(
slug,
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),

Some files were not shown because too many files have changed in this diff Show More