Compare commits

..

1 Commits

Author SHA1 Message Date
Colin Young
8db0700334 app-server: expose image-context tri-state 2026-02-10 13:03:57 -08:00
330 changed files with 6064 additions and 15662 deletions

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm

View File

@@ -103,42 +103,9 @@ jobs:
CODEX_BWRAP_ENABLE_FFI: ${{ contains(matrix.target, 'unknown-linux') && '1' || '0' }}
shell: bash
run: |
bazel_args=(
test
//...
--build_metadata=REPO_URL=https://github.com/openai/codex.git
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
--build_metadata=ROLE=CI
--build_metadata=VISIBILITY=PUBLIC
)
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
echo "BuildBuddy API key is available; using remote Bazel configuration."
bazel $BAZEL_STARTUP_ARGS \
--bazelrc=.github/workflows/ci.bazelrc \
"${bazel_args[@]}" \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY"
else
echo "BuildBuddy API key is not available; using local Bazel configuration."
# Keep fork/community PRs on Bazel but disable remote services that are
# configured in .bazelrc and require auth.
#
# Flag docs:
# - Command-line reference: https://bazel.build/reference/command-line-reference
# - Remote caching overview: https://bazel.build/remote/caching
# - Remote execution overview: https://bazel.build/remote/rbe
# - Build Event Protocol overview: https://bazel.build/remote/bep
#
# --noexperimental_remote_repo_contents_cache:
# disable remote repo contents cache enabled in .bazelrc startup options.
# https://bazel.build/reference/command-line-reference#startup_options-flag--experimental_remote_repo_contents_cache
# --remote_cache= and --remote_executor=:
# clear remote cache/execution endpoints configured in .bazelrc.
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_cache
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_executor
bazel $BAZEL_STARTUP_ARGS \
--noexperimental_remote_repo_contents_cache \
"${bazel_args[@]}" \
--remote_cache= \
--remote_executor=
fi
bazel $BAZEL_STARTUP_ARGS --bazelrc=.github/workflows/ci.bazelrc test //... \
--build_metadata=REPO_URL=https://github.com/openai/codex.git \
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD) \
--build_metadata=ROLE=CI \
--build_metadata=VISIBILITY=PUBLIC \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY"

View File

@@ -379,8 +379,22 @@ jobs:
cargo chef cook --recipe-path "$RECIPE" --target ${{ matrix.target }} --release --all-features
- name: cargo clippy
id: clippy
run: cargo clippy --target ${{ matrix.target }} --all-features --tests --profile ${{ matrix.profile }} -- -D warnings
# Running `cargo build` from the workspace root builds the workspace using
# the union of all features from third-party crates. This can mask errors
# where individual crates have underspecified features. To avoid this, we
# run `cargo check` for each crate individually, though because this is
# slower, we only do this for the x86_64-unknown-linux-gnu target.
- name: cargo check individual crates
id: cargo_check_all_crates
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }}
continue-on-error: true
run: |
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
# Save caches explicitly; make non-fatal so cache packaging
# never fails the overall job. Only save when key wasn't hit.
- name: Save cargo home cache
@@ -433,6 +447,15 @@ jobs:
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
# Fail the job if any of the previous steps failed.
- name: verify all steps passed
if: |
steps.clippy.outcome == 'failure' ||
steps.cargo_check_all_crates.outcome == 'failure'
run: |
echo "One or more checks failed (clippy or cargo_check_all_crates). See logs for details."
exit 1
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}

View File

@@ -1,269 +0,0 @@
name: rust-release-windows
on:
workflow_call:
inputs:
release-lto:
required: true
type: string
secrets:
AZURE_TRUSTED_SIGNING_CLIENT_ID:
required: true
AZURE_TRUSTED_SIGNING_TENANT_ID:
required: true
AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID:
required: true
AZURE_TRUSTED_SIGNING_ENDPOINT:
required: true
AZURE_TRUSTED_SIGNING_ACCOUNT_NAME:
required: true
AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME:
required: true
jobs:
build-windows-binaries:
name: Build Windows binaries - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 60
permissions:
contents: read
defaults:
run:
working-directory: codex-rs
env:
CARGO_PROFILE_RELEASE_LTO: ${{ inputs.release-lto }}
strategy:
fail-fast: false
matrix:
include:
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: primary
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: primary
build_args: --bin codex --bin codex-responses-api-proxy
runs_on:
group: codex-runners
labels: codex-windows-arm64
- runner: windows-x64
target: x86_64-pc-windows-msvc
bundle: helpers
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
bundle: helpers
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Print runner specs (Windows)
shell: powershell
run: |
$computer = Get-CimInstance Win32_ComputerSystem
$cpu = Get-CimInstance Win32_Processor | Select-Object -First 1
$ramGiB = [math]::Round($computer.TotalPhysicalMemory / 1GB, 1)
Write-Host "Runner: $env:RUNNER_NAME"
Write-Host "OS: $([System.Environment]::OSVersion.VersionString)"
Write-Host "CPU: $($cpu.Name)"
Write-Host "Logical CPUs: $($computer.NumberOfLogicalProcessors)"
Write-Host "Physical CPUs: $($computer.NumberOfProcessors)"
Write-Host "Total RAM: $ramGiB GiB"
Write-Host "Disk usage:"
Get-PSDrive -PSProvider FileSystem | Format-Table -AutoSize Name, @{Name='Size(GB)';Expression={[math]::Round(($_.Used + $_.Free) / 1GB, 1)}}, @{Name='Free(GB)';Expression={[math]::Round($_.Free / 1GB, 1)}}
- uses: dtolnay/rust-toolchain@1.93
with:
targets: ${{ matrix.target }}
- uses: actions/cache@v5
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-windows-${{ matrix.bundle }}-${{ hashFiles('**/Cargo.lock') }}
- name: Cargo build (Windows binaries)
shell: bash
run: |
cargo build --target ${{ matrix.target }} --release ${{ matrix.build_args }}
- name: Stage Windows binaries
shell: bash
run: |
output_dir="target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}"
mkdir -p "$output_dir"
if [[ "${{ matrix.bundle }}" == "primary" ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$output_dir/codex.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$output_dir/codex-responses-api-proxy.exe"
else
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$output_dir/codex-windows-sandbox-setup.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$output_dir/codex-command-runner.exe"
fi
- name: Upload Windows binaries
uses: actions/upload-artifact@v6
with:
name: windows-binaries-${{ matrix.target }}-${{ matrix.bundle }}
path: |
codex-rs/target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}/*
build-windows:
needs:
- build-windows-binaries
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on }}
timeout-minutes: 60
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: codex-rs
strategy:
fail-fast: false
matrix:
include:
- runner: windows-x64
target: x86_64-pc-windows-msvc
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
target: aarch64-pc-windows-msvc
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- name: Download prebuilt Windows primary binaries
uses: actions/download-artifact@v7
with:
name: windows-binaries-${{ matrix.target }}-primary
path: codex-rs/target/${{ matrix.target }}/release
- name: Download prebuilt Windows helper binaries
uses: actions/download-artifact@v7
with:
name: windows-binaries-${{ matrix.target }}-helpers
path: codex-rs/target/${{ matrix.target }}/release
- name: Verify binaries
shell: bash
run: |
set -euo pipefail
ls -lh target/${{ matrix.target }}/release/codex.exe
ls -lh target/${{ matrix.target }}/release/codex-responses-api-proxy.exe
ls -lh target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe
ls -lh target/${{ matrix.target }}/release/codex-command-runner.exe
- name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
- name: Stage artifacts
shell: bash
run: |
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
- if: ${{ matrix.runner == 'windows-arm64' }}
name: Install zstd
shell: powershell
run: choco install -y zstandard
- name: Compress artifacts
shell: bash
run: |
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` and `.zip` for every Windows binary.
# The end result is:
# codex-<target>.zst
# codex-<target>.tar.gz
# codex-<target>.zip
for f in "$dest"/*; do
base="$(basename "$f")"
# Skip files that are already archives (shouldn't happen, but be
# safe).
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
continue
fi
# Don't try to compress signature bundles.
if [[ "$base" == *.sigstore ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Create zip archive for Windows binaries.
# Must run from inside the dest dir so 7z won't embed the
# directory path inside the zip.
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Keep raw executables and produce .zst alongside them.
zstd -T0 -19 "$dest/$base"
done
- uses: actions/upload-artifact@v6
with:
name: ${{ matrix.target }}
path: |
codex-rs/dist/${{ matrix.target }}/*

View File

@@ -48,7 +48,7 @@ jobs:
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 60
permissions:
contents: read
@@ -76,38 +76,13 @@ jobs:
target: aarch64-unknown-linux-musl
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- runner: windows-latest
target: x86_64-pc-windows-msvc
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v6
- name: Print runner specs (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
cpu_model="$(lscpu | awk -F: '/Model name/ {gsub(/^[ \t]+/, "", $2); print $2; exit}')"
total_ram="$(awk '/MemTotal/ {printf "%.1f GiB\n", $2 / 1024 / 1024}' /proc/meminfo)"
echo "Runner: ${RUNNER_NAME:-unknown}"
echo "OS: $(uname -a)"
echo "CPU model: ${cpu_model}"
echo "Logical CPUs: $(nproc)"
echo "Total RAM: ${total_ram}"
echo "Disk usage:"
df -h .
- name: Print runner specs (macOS)
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
set -euo pipefail
total_ram="$(sysctl -n hw.memsize | awk '{printf "%.1f GiB\n", $1 / 1024 / 1024 / 1024}')"
echo "Runner: ${RUNNER_NAME:-unknown}"
echo "OS: $(sw_vers -productName) $(sw_vers -productVersion)"
echo "Hardware model: $(sysctl -n hw.model)"
echo "CPU architecture: $(uname -m)"
echo "Logical CPUs: $(sysctl -n hw.logicalcpu)"
echo "Physical CPUs: $(sysctl -n hw.physicalcpu)"
echo "Total RAM: ${total_ram}"
echo "Disk usage:"
df -h .
- name: Install Linux bwrap build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
@@ -220,7 +195,11 @@ jobs:
- name: Cargo build
shell: bash
run: |
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
if [[ "${{ contains(matrix.target, 'windows') }}" == 'true' ]]; then
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy --bin codex-windows-sandbox-setup --bin codex-command-runner
else
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
fi
- if: ${{ contains(matrix.target, 'linux') }}
name: Cosign Linux artifacts
@@ -229,6 +208,18 @@ jobs:
target: ${{ matrix.target }}
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
- if: ${{ contains(matrix.target, 'windows') }}
name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
- if: ${{ runner.os == 'macOS' }}
name: MacOS code signing (binaries)
uses: ./.github/actions/macos-code-sign
@@ -307,8 +298,15 @@ jobs:
dest="dist/${{ matrix.target }}"
mkdir -p "$dest"
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
if [[ "${{ matrix.runner }}" == windows* ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
else
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
fi
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
@@ -319,18 +317,34 @@ jobs:
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
fi
- if: ${{ matrix.runner == 'windows-11-arm' }}
name: Install zstd
shell: powershell
run: choco install -y zstandard
- name: Compress artifacts
shell: bash
run: |
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
# Windows targets; remove them elsewhere to limit the number of
# artifacts that end up in the GitHub Release.
keep_originals=false
if [[ "${{ matrix.runner }}" == windows* ]]; then
keep_originals=true
fi
# For compatibility with environments that lack the `zstd` tool we
# additionally create a `.tar.gz` alongside every binary we publish.
# The end result is:
# additionally create a `.tar.gz` for all platforms and `.zip` for
# Windows alongside every single binary that we publish. The end result is:
# codex-<target>.zst (existing)
# codex-<target>.tar.gz (new)
# codex-<target>.zip (only for Windows)
# 1. Produce a .tar.gz for every file in the directory *before* we
# run `zstd --rm`, because that flag deletes the original files.
@@ -350,9 +364,43 @@ jobs:
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
# Also create .zst and remove the uncompressed binaries to keep
# non-Windows artifact directories small.
zstd -T0 -19 --rm "$dest/$base"
# Create zip archive for Windows binaries
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
fi
# Also create .zst (existing behaviour) *and* remove the original
# uncompressed binary to keep the directory small.
zstd_args=(-T0 -19)
if [[ "${keep_originals}" == false ]]; then
zstd_args+=(--rm)
fi
zstd "${zstd_args[@]}" "$dest/$base"
done
- uses: actions/upload-artifact@v6
@@ -363,13 +411,6 @@ jobs:
path: |
codex-rs/dist/${{ matrix.target }}/*
build-windows:
needs: tag-check
uses: ./.github/workflows/rust-release-windows.yml
with:
release-lto: ${{ contains(github.ref_name, '-alpha') && 'thin' || 'fat' }}
secrets: inherit
shell-tool-mcp:
name: shell-tool-mcp
needs: tag-check
@@ -382,7 +423,6 @@ jobs:
release:
needs:
- build
- build-windows
- shell-tool-mcp
name: release
runs-on: ubuntu-latest
@@ -431,7 +471,6 @@ jobs:
- name: Delete entries from dist/ that should not go in the release
run: |
rm -rf dist/shell-tool-mcp*
rm -rf dist/windows-binaries*
ls -R dist/

97
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

3
NOTICE
View File

@@ -4,6 +4,3 @@ Copyright 2025 OpenAI
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
Copyright (c) 2016-2022 Florian Dehau
Copyright (c) 2023-2025 The Ratatui Developers
This project includes Meriyah parser assets from [meriyah](https://github.com/meriyah/meriyah), licensed under the ISC license.
Copyright (c) 2019 and later, KFlash and others.

View File

@@ -76,7 +76,7 @@ PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
"codex-win32-x64": ["codex", "rg", "codex-windows-sandbox-setup", "codex-command-runner"],
"codex-win32-arm64": ["codex", "rg", "codex-windows-sandbox-setup", "codex-command-runner"],
"codex-responses-api-proxy": ["codex-responses-api-proxy"],
"codex-sdk": [],
"codex-sdk": ["codex"],
}
PACKAGE_TARGET_FILTERS: dict[str, str] = {
@@ -205,6 +205,7 @@ def main() -> int:
f"Staged version {version} for release in {staging_dir_str}\n\n"
"Verify the SDK contents:\n"
f" ls {staging_dir_str}/dist\n"
f" ls {staging_dir_str}/vendor\n"
" node -e \"import('./dist/index.js').then(() => console.log('ok'))\"\n\n"
)
else:
@@ -317,11 +318,12 @@ def stage_sources(staging_dir: Path, version: str, package: str) -> None:
if isinstance(scripts, dict):
scripts.pop("prepare", None)
dependencies = package_json.get("dependencies")
if not isinstance(dependencies, dict):
dependencies = {}
dependencies[CODEX_NPM_NAME] = version
package_json["dependencies"] = dependencies
files = package_json.get("files")
if isinstance(files, list):
if "vendor" not in files:
files.append("vendor")
else:
package_json["files"] = ["dist", "vendor"]
with open(staging_dir / "package.json", "w", encoding="utf-8") as out:
json.dump(package_json, out, indent=2)

View File

@@ -1,3 +1 @@
exports_files([
"node-version.txt",
])

119
codex-rs/Cargo.lock generated
View File

@@ -1348,12 +1348,12 @@ dependencies = [
"axum",
"base64 0.22.1",
"chrono",
"clap",
"codex-app-server-protocol",
"codex-arg0",
"codex-backend-client",
"codex-chatgpt",
"codex-cloud-requirements",
"codex-common",
"codex-core",
"codex-execpolicy",
"codex-feedback",
@@ -1362,13 +1362,9 @@ dependencies = [
"codex-protocol",
"codex-rmcp-client",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
"codex-utils-cli",
"codex-utils-json-to-toml",
"core_test_support",
"futures",
"os_info",
"owo-colors",
"pretty_assertions",
"rmcp",
"serde",
@@ -1378,7 +1374,6 @@ dependencies = [
"tempfile",
"time",
"tokio",
"tokio-tungstenite",
"toml 0.9.11+spec-1.1.0",
"tracing",
"tracing-subscriber",
@@ -1490,10 +1485,10 @@ version = "0.0.0"
dependencies = [
"anyhow",
"clap",
"codex-common",
"codex-core",
"codex-git",
"codex-utils-cargo-bin",
"codex-utils-cli",
"pretty_assertions",
"serde",
"serde_json",
@@ -1517,6 +1512,7 @@ dependencies = [
"codex-arg0",
"codex-chatgpt",
"codex-cloud-tasks",
"codex-common",
"codex-core",
"codex-exec",
"codex-execpolicy",
@@ -1528,7 +1524,6 @@ dependencies = [
"codex-stdio-to-uds",
"codex-tui",
"codex-utils-cargo-bin",
"codex-utils-cli",
"codex-windows-sandbox",
"libc",
"owo-colors",
@@ -1572,18 +1567,13 @@ version = "0.0.0"
dependencies = [
"async-trait",
"base64 0.22.1",
"chrono",
"codex-backend-client",
"codex-core",
"codex-otel",
"codex-protocol",
"hmac",
"pretty_assertions",
"serde",
"serde_json",
"sha2",
"tempfile",
"thiserror 2.0.18",
"tokio",
"toml 0.9.11+spec-1.1.0",
"tracing",
@@ -1599,10 +1589,10 @@ dependencies = [
"chrono",
"clap",
"codex-cloud-tasks-client",
"codex-common",
"codex-core",
"codex-login",
"codex-tui",
"codex-utils-cli",
"crossterm",
"owo-colors",
"pretty_assertions",
@@ -1634,22 +1624,17 @@ dependencies = [
]
[[package]]
name = "codex-config"
name = "codex-common"
version = "0.0.0"
dependencies = [
"anyhow",
"codex-app-server-protocol",
"codex-execpolicy",
"clap",
"codex-core",
"codex-lmstudio",
"codex-ollama",
"codex-protocol",
"codex-utils-absolute-path",
"futures",
"multimap",
"pretty_assertions",
"serde",
"serde_json",
"sha2",
"thiserror 2.0.18",
"tokio",
"toml 0.9.11+spec-1.1.0",
]
@@ -1675,7 +1660,7 @@ dependencies = [
"codex-arg0",
"codex-async-utils",
"codex-client",
"codex-config",
"codex-core",
"codex-execpolicy",
"codex-file-search",
"codex-git",
@@ -1685,7 +1670,6 @@ dependencies = [
"codex-otel",
"codex-protocol",
"codex-rmcp-client",
"codex-shell-command",
"codex-state",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
@@ -1712,6 +1696,7 @@ dependencies = [
"landlock",
"libc",
"maplit",
"multimap",
"notify",
"once_cell",
"openssl-sys",
@@ -1748,6 +1733,8 @@ dependencies = [
"tracing",
"tracing-subscriber",
"tracing-test",
"tree-sitter",
"tree-sitter-bash",
"url",
"uuid",
"walkdir",
@@ -1780,14 +1767,11 @@ dependencies = [
"clap",
"codex-arg0",
"codex-cloud-requirements",
"codex-common",
"codex-core",
"codex-protocol",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
"codex-utils-cli",
"codex-utils-elapsed",
"codex-utils-oss",
"codex-utils-sandbox-summary",
"core_test_support",
"libc",
"owo-colors",
@@ -2008,9 +1992,9 @@ version = "0.0.0"
dependencies = [
"anyhow",
"codex-arg0",
"codex-common",
"codex-core",
"codex-protocol",
"codex-utils-cli",
"codex-utils-json-to-toml",
"core_test_support",
"mcp_test_support",
@@ -2206,26 +2190,6 @@ dependencies = [
"tracing",
]
[[package]]
name = "codex-shell-command"
version = "0.0.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"codex-protocol",
"codex-utils-absolute-path",
"once_cell",
"pretty_assertions",
"regex",
"serde",
"serde_json",
"shlex",
"tree-sitter",
"tree-sitter-bash",
"url",
"which",
]
[[package]]
name = "codex-state"
version = "0.0.0"
@@ -2277,6 +2241,7 @@ dependencies = [
"codex-chatgpt",
"codex-cli",
"codex-cloud-requirements",
"codex-common",
"codex-core",
"codex-feedback",
"codex-file-search",
@@ -2285,14 +2250,8 @@ dependencies = [
"codex-protocol",
"codex-state",
"codex-utils-absolute-path",
"codex-utils-approval-presets",
"codex-utils-cargo-bin",
"codex-utils-cli",
"codex-utils-elapsed",
"codex-utils-fuzzy-match",
"codex-utils-oss",
"codex-utils-pty",
"codex-utils-sandbox-summary",
"codex-windows-sandbox",
"color-eyre",
"crossterm",
@@ -2358,13 +2317,6 @@ dependencies = [
"ts-rs",
]
[[package]]
name = "codex-utils-approval-presets"
version = "0.0.0"
dependencies = [
"codex-core",
]
[[package]]
name = "codex-utils-cache"
version = "0.0.0"
@@ -2383,26 +2335,6 @@ dependencies = [
"thiserror 2.0.18",
]
[[package]]
name = "codex-utils-cli"
version = "0.0.0"
dependencies = [
"clap",
"codex-core",
"codex-protocol",
"pretty_assertions",
"serde",
"toml 0.9.11+spec-1.1.0",
]
[[package]]
name = "codex-utils-elapsed"
version = "0.0.0"
[[package]]
name = "codex-utils-fuzzy-match"
version = "0.0.0"
[[package]]
name = "codex-utils-home-dir"
version = "0.0.0"
@@ -2433,15 +2365,6 @@ dependencies = [
"toml 0.9.11+spec-1.1.0",
]
[[package]]
name = "codex-utils-oss"
version = "0.0.0"
dependencies = [
"codex-core",
"codex-lmstudio",
"codex-ollama",
]
[[package]]
name = "codex-utils-pty"
version = "0.0.0"
@@ -2476,15 +2399,6 @@ dependencies = [
"rustls",
]
[[package]]
name = "codex-utils-sandbox-summary"
version = "0.0.0"
dependencies = [
"codex-core",
"codex-utils-absolute-path",
"pretty_assertions",
]
[[package]]
name = "codex-utils-string"
version = "0.0.0"
@@ -2706,7 +2620,6 @@ dependencies = [
"codex-protocol",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
"ctor 0.6.3",
"futures",
"notify",
"pretty_assertions",

View File

@@ -15,8 +15,7 @@ members = [
"cloud-tasks",
"cloud-tasks-client",
"cli",
"config",
"shell-command",
"common",
"core",
"hooks",
"secrets",
@@ -50,12 +49,6 @@ members = [
"utils/readiness",
"utils/rustls-provider",
"utils/string",
"utils/cli",
"utils/elapsed",
"utils/sandbox-summary",
"utils/approval-presets",
"utils/oss",
"utils/fuzzy-match",
"codex-client",
"codex-api",
"state",
@@ -64,7 +57,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.100.0-alpha.3"
version = "0.0.0"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024
@@ -84,19 +77,20 @@ codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli" }
codex-client = { path = "codex-client" }
codex-cloud-requirements = { path = "cloud-requirements" }
codex-config = { path = "config" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli"}
codex-client = { path = "codex-client" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-hooks = { path = "hooks" }
codex-secrets = { path = "secrets" }
codex-exec = { path = "exec" }
codex-execpolicy = { path = "execpolicy" }
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
codex-feedback = { path = "feedback" }
codex-file-search = { path = "file-search" }
codex-git = { path = "utils/git" }
codex-hooks = { path = "hooks" }
codex-keyring-store = { path = "keyring-store" }
codex-linux-sandbox = { path = "linux-sandbox" }
codex-lmstudio = { path = "lmstudio" }
@@ -109,26 +103,18 @@ codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-secrets = { path = "secrets" }
codex-shell-command = { path = "shell-command" }
codex-state = { path = "state" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-approval-presets = { path = "utils/approval-presets" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
codex-utils-cli = { path = "utils/cli" }
codex-utils-elapsed = { path = "utils/elapsed" }
codex-utils-fuzzy-match = { path = "utils/fuzzy-match" }
codex-utils-home-dir = { path = "utils/home-dir" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-oss = { path = "utils/oss" }
codex-utils-home-dir = { path = "utils/home-dir" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-rustls-provider = { path = "utils/rustls-provider" }
codex-utils-sandbox-summary = { path = "utils/sandbox-summary" }
codex-utils-string = { path = "utils/string" }
codex-windows-sandbox = { path = "windows-sandbox-rs" }
core_test_support = { path = "core/tests/common" }
@@ -141,12 +127,12 @@ allocative = "0.3.3"
ansi-to-tui = "7.0.0"
anyhow = "1"
arboard = { version = "3", features = ["wayland-data-control"] }
askama = "0.15.4"
assert_cmd = "2"
assert_matches = "1.5.0"
async-channel = "2.3.1"
async-stream = "0.3.6"
async-trait = "0.1.89"
askama = "0.15.4"
axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bm25 = "2.3.2"
@@ -156,8 +142,8 @@ chrono = "0.4.43"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossbeam-channel = "0.5.15"
crossterm = "0.28.1"
crossbeam-channel = "0.5.15"
ctor = "0.6.3"
derive_more = "2"
diffy = "0.4.2"
@@ -175,10 +161,10 @@ icu_decimal = "2.1"
icu_locale_core = "2.1"
icu_provider = { version = "2.1", features = ["sync"] }
ignore = "0.4.23"
indoc = "2.0"
image = { version = "^0.25.9", default-features = false }
include_dir = "0.7.4"
indexmap = "2.12.0"
indoc = "2.0"
insta = "1.46.3"
inventory = "0.3.19"
itertools = "0.14.0"
@@ -200,6 +186,7 @@ opentelemetry-appender-tracing = "0.31.0"
opentelemetry-otlp = "0.31.0"
opentelemetry-semantic-conventions = "0.31.0"
opentelemetry_sdk = "0.31.0"
tracing-opentelemetry = "0.32.0"
os_info = "3.12.0"
owo-colors = "4.2.0"
path-absolutize = "3.1.1"
@@ -215,14 +202,10 @@ regex = "1.12.3"
regex-lite = "0.1.8"
reqwest = "0.12"
rmcp = { version = "0.14.0", default-features = false }
rustls = { version = "0.23", default-features = false, features = ["ring", "std"] }
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
rustls = { version = "0.23", default-features = false, features = [
"ring",
"std",
] }
schemars = "0.8.22"
seccompiler = "0.5.0"
semver = "1.0"
sentry = "0.46.0"
serde = "1"
serde_json = "1"
@@ -232,19 +215,11 @@ serde_yaml = "0.9"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
sqlx = { version = "0.8.6", default-features = false, features = [
"chrono",
"json",
"macros",
"migrate",
"runtime-tokio-rustls",
"sqlite",
"time",
"uuid",
] }
sqlx = { version = "0.8.6", default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio-rustls", "sqlite", "time", "uuid"] }
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
@@ -259,23 +234,20 @@ tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.18"
tokio-test = "0.4"
tokio-tungstenite = { version = "0.28.0", features = [
"proxy",
"rustls-tls-native-roots",
] }
tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-native-roots"] }
tungstenite = { version = "0.27.0", features = ["deflate", "proxy"] }
tokio-util = "0.7.18"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.44"
tracing-appender = "0.2.3"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"
tree-sitter = "0.25.10"
tree-sitter-bash = "0.25"
zstd = "0.13"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
tungstenite = { version = "0.27.0", features = ["deflate", "proxy"] }
uds_windows = "1.1.0"
unicode-segmentation = "1.12.0"
unicode-width = "0.2"
@@ -288,7 +260,6 @@ webbrowser = "1.0"
which = "8"
wildmatch = "2.6.1"
zip = "2.4.2"
zstd = "0.13"
wiremock = "0.6"
zeroize = "1.8.2"
@@ -334,12 +305,7 @@ unwrap_used = "deny"
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
# silence the false positive here instead of deleting a real dependency.
[workspace.metadata.cargo-shear]
ignored = [
"icu_provider",
"openssl-sys",
"codex-utils-readiness",
"codex-secrets",
]
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness", "codex-secrets"]
[profile.release]
lto = "fat"

View File

@@ -175,7 +175,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -185,6 +184,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -532,9 +560,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -544,7 +569,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -559,9 +583,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -571,7 +592,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -2109,12 +2129,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -3442,18 +3456,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{
@@ -5301,9 +5303,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -5313,7 +5312,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -5328,9 +5326,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -5340,7 +5335,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -6878,12 +6872,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"

View File

@@ -373,7 +373,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -383,6 +382,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -487,7 +515,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -497,6 +524,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo2",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -1148,9 +1204,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -1160,7 +1213,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -1175,9 +1227,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -1187,7 +1236,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -2725,12 +2773,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -3760,6 +3802,25 @@
],
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"ItemCompletedNotification": {
"properties": {
"item": {
@@ -4350,18 +4411,6 @@
}
]
},
"limitId": {
"type": [
"string",
"null"
]
},
"limitName": {
"type": [
"string",
"null"
]
},
"planType": {
"anyOf": [
{
@@ -4407,18 +4456,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{
@@ -5992,6 +6029,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -1884,7 +1884,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -1894,6 +1893,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -2545,9 +2573,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -2557,7 +2582,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -2572,9 +2596,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -2584,7 +2605,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -4122,12 +4142,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -6527,18 +6541,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{
@@ -10251,7 +10253,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -10261,6 +10262,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -11793,22 +11823,7 @@
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {
"rateLimits": {
"allOf": [
{
"$ref": "#/definitions/v2/RateLimitSnapshot"
}
],
"description": "Backward-compatible single-bucket view; mirrors the historical payload."
},
"rateLimitsByLimitId": {
"additionalProperties": {
"$ref": "#/definitions/v2/RateLimitSnapshot"
},
"description": "Multi-bucket view keyed by metered `limit_id` (for example, `codex`).",
"type": [
"object",
"null"
]
"$ref": "#/definitions/v2/RateLimitSnapshot"
}
},
"required": [
@@ -12823,18 +12838,6 @@
}
]
},
"limitId": {
"type": [
"string",
"null"
]
},
"limitName": {
"type": [
"string",
"null"
]
},
"planType": {
"anyOf": [
{
@@ -14402,6 +14405,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/v2/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -175,7 +175,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -185,6 +184,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -532,9 +560,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -544,7 +569,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -559,9 +583,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -571,7 +592,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -2109,12 +2129,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -3442,18 +3456,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{

View File

@@ -175,7 +175,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -185,6 +184,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -532,9 +560,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -544,7 +569,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -559,9 +583,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -571,7 +592,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -2109,12 +2129,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -3442,18 +3456,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{

View File

@@ -175,7 +175,6 @@
"enum": [
"context_window_exceeded",
"usage_limit_exceeded",
"server_overloaded",
"internal_server_error",
"unauthorized",
"bad_request",
@@ -185,6 +184,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"model_cap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"model_cap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -532,9 +560,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_started"
@@ -544,7 +569,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskStartedEventMsg",
@@ -559,9 +583,6 @@
"null"
]
},
"turn_id": {
"type": "string"
},
"type": {
"enum": [
"task_complete"
@@ -571,7 +592,6 @@
}
},
"required": [
"turn_id",
"type"
],
"title": "TaskCompleteEventMsg",
@@ -2109,12 +2129,6 @@
"reason": {
"$ref": "#/definitions/TurnAbortReason"
},
"turn_id": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"turn_aborted"
@@ -3442,18 +3456,6 @@
}
]
},
"limit_id": {
"type": [
"string",
"null"
]
},
"limit_name": {
"type": [
"string",
"null"
]
},
"plan_type": {
"anyOf": [
{

View File

@@ -48,18 +48,6 @@
}
]
},
"limitId": {
"type": [
"string",
"null"
]
},
"limitName": {
"type": [
"string",
"null"
]
},
"planType": {
"anyOf": [
{

View File

@@ -8,7 +8,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -18,6 +17,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {

View File

@@ -48,18 +48,6 @@
}
]
},
"limitId": {
"type": [
"string",
"null"
]
},
"limitName": {
"type": [
"string",
"null"
]
},
"planType": {
"anyOf": [
{
@@ -122,22 +110,7 @@
},
"properties": {
"rateLimits": {
"allOf": [
{
"$ref": "#/definitions/RateLimitSnapshot"
}
],
"description": "Backward-compatible single-bucket view; mirrors the historical payload."
},
"rateLimitsByLimitId": {
"additionalProperties": {
"$ref": "#/definitions/RateLimitSnapshot"
},
"description": "Multi-bucket view keyed by metered `limit_id` (for example, `codex`).",
"type": [
"object",
"null"
]
"$ref": "#/definitions/RateLimitSnapshot"
}
},
"required": [

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {

View File

@@ -40,7 +40,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -50,6 +49,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -354,6 +382,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -673,6 +720,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -341,6 +369,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -546,6 +593,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -341,6 +369,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -546,6 +593,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -40,7 +40,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -50,6 +49,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -354,6 +382,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -673,6 +720,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -341,6 +369,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -546,6 +593,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -40,7 +40,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -50,6 +49,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -354,6 +382,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -673,6 +720,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -341,6 +369,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -546,6 +593,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {
@@ -341,6 +369,25 @@
},
"type": "object"
},
"InputModality": {
"description": "Canonical user-input modality tags advertised by a model.",
"oneOf": [
{
"description": "Plain text turns and tool payloads.",
"enum": [
"text"
],
"type": "string"
},
{
"description": "Image attachments included in user turns.",
"enum": [
"image"
],
"type": "string"
}
]
},
"McpToolCallError": {
"properties": {
"message": {
@@ -546,6 +593,17 @@
"description": "Version of the CLI that created the thread.",
"type": "string"
},
"conversationModalities": {
"default": null,
"description": "Tri-state conversation modalities signal: - `None`: unknown / not yet determined - `Some([Text])`: known to be text-only - `Some([Text, Image])`: images are known to exist in context",
"items": {
"$ref": "#/definitions/InputModality"
},
"type": [
"array",
"null"
]
},
"createdAt": {
"description": "Unix timestamp (in seconds) when the thread was created.",
"format": "int64",

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {

View File

@@ -27,7 +27,6 @@
"enum": [
"contextWindowExceeded",
"usageLimitExceeded",
"serverOverloaded",
"internalServerError",
"unauthorized",
"badRequest",
@@ -37,6 +36,35 @@
],
"type": "string"
},
{
"additionalProperties": false,
"properties": {
"modelCap": {
"properties": {
"model": {
"type": "string"
},
"reset_after_seconds": {
"format": "uint64",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"required": [
"model"
],
"type": "object"
}
},
"required": [
"modelCap"
],
"title": "ModelCapCodexErrorInfo",
"type": "object"
},
{
"additionalProperties": false,
"properties": {

View File

@@ -5,4 +5,4 @@
/**
* Codex errors that we expose to clients.
*/
export type CodexErrorInfo = "context_window_exceeded" | "usage_limit_exceeded" | "server_overloaded" | { "http_connection_failed": { http_status_code: number | null, } } | { "response_stream_connection_failed": { http_status_code: number | null, } } | "internal_server_error" | "unauthorized" | "bad_request" | "sandbox_error" | { "response_stream_disconnected": { http_status_code: number | null, } } | { "response_too_many_failed_attempts": { http_status_code: number | null, } } | "thread_rollback_failed" | "other";
export type CodexErrorInfo = "context_window_exceeded" | "usage_limit_exceeded" | { "model_cap": { model: string, reset_after_seconds: bigint | null, } } | { "http_connection_failed": { http_status_code: number | null, } } | { "response_stream_connection_failed": { http_status_code: number | null, } } | "internal_server_error" | "unauthorized" | "bad_request" | "sandbox_error" | { "response_stream_disconnected": { http_status_code: number | null, } } | { "response_too_many_failed_attempts": { http_status_code: number | null, } } | "thread_rollback_failed" | "other";

View File

@@ -5,4 +5,4 @@ import type { CreditsSnapshot } from "./CreditsSnapshot";
import type { PlanType } from "./PlanType";
import type { RateLimitWindow } from "./RateLimitWindow";
export type RateLimitSnapshot = { limit_id: string | null, limit_name: string | null, primary: RateLimitWindow | null, secondary: RateLimitWindow | null, credits: CreditsSnapshot | null, plan_type: PlanType | null, };
export type RateLimitSnapshot = { primary: RateLimitWindow | null, secondary: RateLimitWindow | null, credits: CreditsSnapshot | null, plan_type: PlanType | null, };

View File

@@ -3,4 +3,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { TurnAbortReason } from "./TurnAbortReason";
export type TurnAbortedEvent = { turn_id: string | null, reason: TurnAbortReason, };
export type TurnAbortedEvent = { reason: TurnAbortReason, };

View File

@@ -2,4 +2,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type TurnCompleteEvent = { turn_id: string, last_agent_message: string | null, };
export type TurnCompleteEvent = { last_agent_message: string | null, };

View File

@@ -3,4 +3,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { ModeKind } from "./ModeKind";
export type TurnStartedEvent = { turn_id: string, model_context_window: bigint | null, collaboration_mode_kind: ModeKind, };
export type TurnStartedEvent = { model_context_window: bigint | null, collaboration_mode_kind: ModeKind, };

View File

@@ -8,4 +8,4 @@
* When an upstream HTTP status is available (for example, from the Responses API or a provider),
* it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
*/
export type CodexErrorInfo = "contextWindowExceeded" | "usageLimitExceeded" | "serverOverloaded" | { "httpConnectionFailed": { httpStatusCode: number | null, } } | { "responseStreamConnectionFailed": { httpStatusCode: number | null, } } | "internalServerError" | "unauthorized" | "badRequest" | "threadRollbackFailed" | "sandboxError" | { "responseStreamDisconnected": { httpStatusCode: number | null, } } | { "responseTooManyFailedAttempts": { httpStatusCode: number | null, } } | "other";
export type CodexErrorInfo = "contextWindowExceeded" | "usageLimitExceeded" | { "modelCap": { model: string, reset_after_seconds: bigint | null, } } | { "httpConnectionFailed": { httpStatusCode: number | null, } } | { "responseStreamConnectionFailed": { httpStatusCode: number | null, } } | "internalServerError" | "unauthorized" | "badRequest" | "threadRollbackFailed" | "sandboxError" | { "responseStreamDisconnected": { httpStatusCode: number | null, } } | { "responseTooManyFailedAttempts": { httpStatusCode: number | null, } } | "other";

View File

@@ -3,12 +3,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { RateLimitSnapshot } from "./RateLimitSnapshot";
export type GetAccountRateLimitsResponse = {
/**
* Backward-compatible single-bucket view; mirrors the historical payload.
*/
rateLimits: RateLimitSnapshot,
/**
* Multi-bucket view keyed by metered `limit_id` (for example, `codex`).
*/
rateLimitsByLimitId: { [key in string]?: RateLimitSnapshot } | null, };
export type GetAccountRateLimitsResponse = { rateLimits: RateLimitSnapshot, };

View File

@@ -5,4 +5,4 @@ import type { PlanType } from "../PlanType";
import type { CreditsSnapshot } from "./CreditsSnapshot";
import type { RateLimitWindow } from "./RateLimitWindow";
export type RateLimitSnapshot = { limitId: string | null, limitName: string | null, primary: RateLimitWindow | null, secondary: RateLimitWindow | null, credits: CreditsSnapshot | null, planType: PlanType | null, };
export type RateLimitSnapshot = { primary: RateLimitWindow | null, secondary: RateLimitWindow | null, credits: CreditsSnapshot | null, planType: PlanType | null, };

View File

@@ -1,6 +1,7 @@
// GENERATED CODE! DO NOT MODIFY BY HAND!
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { InputModality } from "../InputModality";
import type { GitInfo } from "./GitInfo";
import type { SessionSource } from "./SessionSource";
import type { Turn } from "./Turn";
@@ -10,6 +11,13 @@ export type Thread = { id: string,
* Usually the first user message in the thread, if available.
*/
preview: string,
/**
* Tri-state conversation modalities signal:
* - `None`: unknown / not yet determined
* - `Some([Text])`: known to be text-only
* - `Some([Text, Image])`: images are known to exist in context
*/
conversationModalities: Array<InputModality> | null,
/**
* Model provider used for this thread (for example, 'openai').
*/

View File

@@ -5,25 +5,21 @@ use crate::protocol::v2::TurnStatus;
use crate::protocol::v2::UserInput;
use codex_protocol::protocol::AgentReasoningEvent;
use codex_protocol::protocol::AgentReasoningRawContentEvent;
use codex_protocol::protocol::CompactedItem;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::ItemCompletedEvent;
use codex_protocol::protocol::RolloutItem;
use codex_protocol::protocol::ThreadRolledBackEvent;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::TurnCompleteEvent;
use codex_protocol::protocol::TurnStartedEvent;
use codex_protocol::protocol::UserMessageEvent;
use uuid::Uuid;
/// Convert persisted [`RolloutItem`] entries into a sequence of [`Turn`] values.
/// Convert persisted [`EventMsg`] entries into a sequence of [`Turn`] values.
///
/// When available, this uses `TurnContext.turn_id` as the canonical turn id so
/// resumed/rebuilt thread history preserves the original turn identifiers.
pub fn build_turns_from_rollout_items(items: &[RolloutItem]) -> Vec<Turn> {
/// The purpose of this is to convert the EventMsgs persisted in a rollout file
/// into a sequence of Turns and ThreadItems, which allows the client to render
/// the historical messages when resuming a thread.
pub fn build_turns_from_event_msgs(events: &[EventMsg]) -> Vec<Turn> {
let mut builder = ThreadHistoryBuilder::new();
for item in items {
builder.handle_rollout_item(item);
for event in events {
builder.handle_event(event);
}
builder.finish()
}
@@ -31,6 +27,7 @@ pub fn build_turns_from_rollout_items(items: &[RolloutItem]) -> Vec<Turn> {
struct ThreadHistoryBuilder {
turns: Vec<Turn>,
current_turn: Option<PendingTurn>,
next_turn_index: i64,
next_item_index: i64,
}
@@ -39,6 +36,7 @@ impl ThreadHistoryBuilder {
Self {
turns: Vec::new(),
current_turn: None,
next_turn_index: 1,
next_item_index: 1,
}
}
@@ -65,36 +63,13 @@ impl ThreadHistoryBuilder {
EventMsg::ThreadRolledBack(payload) => self.handle_thread_rollback(payload),
EventMsg::UndoCompleted(_) => {}
EventMsg::TurnAborted(payload) => self.handle_turn_aborted(payload),
EventMsg::TurnStarted(payload) => self.handle_turn_started(payload),
EventMsg::TurnComplete(payload) => self.handle_turn_complete(payload),
_ => {}
}
}
fn handle_rollout_item(&mut self, item: &RolloutItem) {
match item {
RolloutItem::EventMsg(event) => self.handle_event(event),
RolloutItem::Compacted(payload) => self.handle_compacted(payload),
RolloutItem::TurnContext(_)
| RolloutItem::SessionMeta(_)
| RolloutItem::ResponseItem(_) => {}
}
}
fn handle_user_message(&mut self, payload: &UserMessageEvent) {
// User messages should stay in explicitly opened turns. For backward
// compatibility with older streams that did not open turns explicitly,
// close any implicit/inactive turn and start a fresh one for this input.
if let Some(turn) = self.current_turn.as_ref()
&& !turn.opened_explicitly
&& !(turn.saw_compaction && turn.items.is_empty())
{
self.finish_current_turn();
}
let mut turn = self
.current_turn
.take()
.unwrap_or_else(|| self.new_turn(None));
self.finish_current_turn();
let mut turn = self.new_turn();
let id = self.next_item_id();
let content = self.build_user_inputs(payload);
turn.items.push(ThreadItem::UserMessage { id, content });
@@ -172,30 +147,6 @@ impl ThreadHistoryBuilder {
turn.status = TurnStatus::Interrupted;
}
fn handle_turn_started(&mut self, payload: &TurnStartedEvent) {
self.finish_current_turn();
self.current_turn = Some(
self.new_turn(Some(payload.turn_id.clone()))
.opened_explicitly(),
);
}
fn handle_turn_complete(&mut self, _payload: &TurnCompleteEvent) {
if let Some(current_turn) = self.current_turn.as_mut() {
current_turn.status = TurnStatus::Completed;
self.finish_current_turn();
}
}
/// Marks the current turn as containing a persisted compaction marker.
///
/// This keeps compaction-only legacy turns from being dropped by
/// `finish_current_turn` when they have no renderable items and were not
/// explicitly opened.
fn handle_compacted(&mut self, _payload: &CompactedItem) {
self.ensure_turn().saw_compaction = true;
}
fn handle_thread_rollback(&mut self, payload: &ThreadRolledBackEvent) {
self.finish_current_turn();
@@ -206,33 +157,34 @@ impl ThreadHistoryBuilder {
self.turns.truncate(self.turns.len().saturating_sub(n));
}
// Re-number subsequent synthetic ids so the pruned history is consistent.
self.next_turn_index =
i64::try_from(self.turns.len().saturating_add(1)).unwrap_or(i64::MAX);
let item_count: usize = self.turns.iter().map(|t| t.items.len()).sum();
self.next_item_index = i64::try_from(item_count.saturating_add(1)).unwrap_or(i64::MAX);
}
fn finish_current_turn(&mut self) {
if let Some(turn) = self.current_turn.take() {
if turn.items.is_empty() && !turn.opened_explicitly && !turn.saw_compaction {
if turn.items.is_empty() {
return;
}
self.turns.push(turn.into());
}
}
fn new_turn(&mut self, id: Option<String>) -> PendingTurn {
fn new_turn(&mut self) -> PendingTurn {
PendingTurn {
id: id.unwrap_or_else(|| Uuid::now_v7().to_string()),
id: self.next_turn_id(),
items: Vec::new(),
error: None,
status: TurnStatus::Completed,
opened_explicitly: false,
saw_compaction: false,
}
}
fn ensure_turn(&mut self) -> &mut PendingTurn {
if self.current_turn.is_none() {
let turn = self.new_turn(None);
let turn = self.new_turn();
return self.current_turn.insert(turn);
}
@@ -243,6 +195,12 @@ impl ThreadHistoryBuilder {
unreachable!("current turn must exist after initialization");
}
fn next_turn_id(&mut self) -> String {
let id = format!("turn-{}", self.next_turn_index);
self.next_turn_index += 1;
id
}
fn next_item_id(&mut self) -> String {
let id = format!("item-{}", self.next_item_index);
self.next_item_index += 1;
@@ -279,19 +237,6 @@ struct PendingTurn {
items: Vec<ThreadItem>,
error: Option<TurnError>,
status: TurnStatus,
/// True when this turn originated from an explicit `turn_started`/`turn_complete`
/// boundary, so we preserve it even if it has no renderable items.
opened_explicitly: bool,
/// True when this turn includes a persisted `RolloutItem::Compacted`, which
/// should keep the turn from being dropped even without normal items.
saw_compaction: bool,
}
impl PendingTurn {
fn opened_explicitly(mut self) -> Self {
self.opened_explicitly = true;
self
}
}
impl From<PendingTurn> for Turn {
@@ -311,15 +256,11 @@ mod tests {
use codex_protocol::protocol::AgentMessageEvent;
use codex_protocol::protocol::AgentReasoningEvent;
use codex_protocol::protocol::AgentReasoningRawContentEvent;
use codex_protocol::protocol::CompactedItem;
use codex_protocol::protocol::ThreadRolledBackEvent;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::TurnCompleteEvent;
use codex_protocol::protocol::TurnStartedEvent;
use codex_protocol::protocol::UserMessageEvent;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[test]
fn builds_multiple_turns_with_reasoning_items() {
@@ -350,15 +291,11 @@ mod tests {
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 2);
let first = &turns[0];
assert!(Uuid::parse_str(&first.id).is_ok());
assert_eq!(first.id, "turn-1");
assert_eq!(first.status, TurnStatus::Completed);
assert_eq!(first.items.len(), 3);
assert_eq!(
@@ -393,8 +330,7 @@ mod tests {
);
let second = &turns[1];
assert!(Uuid::parse_str(&second.id).is_ok());
assert_ne!(first.id, second.id);
assert_eq!(second.id, "turn-2");
assert_eq!(second.items.len(), 2);
assert_eq!(
second.items[0],
@@ -438,11 +374,7 @@ mod tests {
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 1);
let turn = &turns[0];
assert_eq!(turn.items.len(), 4);
@@ -478,7 +410,6 @@ mod tests {
message: "Working...".into(),
}),
EventMsg::TurnAborted(TurnAbortedEvent {
turn_id: Some("turn-1".into()),
reason: TurnAbortReason::Replaced,
}),
EventMsg::UserMessage(UserMessageEvent {
@@ -492,11 +423,7 @@ mod tests {
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns.len(), 2);
let first_turn = &turns[0];
@@ -575,49 +502,46 @@ mod tests {
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
assert_eq!(turns.len(), 2);
assert!(Uuid::parse_str(&turns[0].id).is_ok());
assert!(Uuid::parse_str(&turns[1].id).is_ok());
assert_ne!(turns[0].id, turns[1].id);
assert_eq!(turns[0].status, TurnStatus::Completed);
assert_eq!(turns[1].status, TurnStatus::Completed);
assert_eq!(
turns[0].items,
vec![
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "First".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
id: "item-2".into(),
text: "A1".into(),
},
]
);
assert_eq!(
turns[1].items,
vec![
ThreadItem::UserMessage {
id: "item-3".into(),
content: vec![UserInput::Text {
text: "Third".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
id: "item-4".into(),
text: "A3".into(),
},
]
);
let turns = build_turns_from_event_msgs(&events);
let expected = vec![
Turn {
id: "turn-1".into(),
status: TurnStatus::Completed,
error: None,
items: vec![
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "First".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
id: "item-2".into(),
text: "A1".into(),
},
],
},
Turn {
id: "turn-2".into(),
status: TurnStatus::Completed,
error: None,
items: vec![
ThreadItem::UserMessage {
id: "item-3".into(),
content: vec![UserInput::Text {
text: "Third".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
id: "item-4".into(),
text: "A3".into(),
},
],
},
];
assert_eq!(turns, expected);
}
#[test]
@@ -644,95 +568,7 @@ mod tests {
EventMsg::ThreadRolledBack(ThreadRolledBackEvent { num_turns: 99 }),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
let turns = build_turns_from_event_msgs(&events);
assert_eq!(turns, Vec::<Turn>::new());
}
#[test]
fn uses_explicit_turn_boundaries_for_mid_turn_steering() {
let events = vec![
EventMsg::TurnStarted(TurnStartedEvent {
turn_id: "turn-a".into(),
model_context_window: None,
collaboration_mode_kind: Default::default(),
}),
EventMsg::UserMessage(UserMessageEvent {
message: "Start".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::UserMessage(UserMessageEvent {
message: "Steer".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::TurnComplete(TurnCompleteEvent {
turn_id: "turn-a".into(),
last_agent_message: None,
}),
];
let items = events
.into_iter()
.map(RolloutItem::EventMsg)
.collect::<Vec<_>>();
let turns = build_turns_from_rollout_items(&items);
assert_eq!(turns.len(), 1);
assert_eq!(turns[0].id, "turn-a");
assert_eq!(
turns[0].items,
vec![
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "Start".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::UserMessage {
id: "item-2".into(),
content: vec![UserInput::Text {
text: "Steer".into(),
text_elements: Vec::new(),
}],
},
]
);
}
#[test]
fn preserves_compaction_only_turn() {
let items = vec![
RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent {
turn_id: "turn-compact".into(),
model_context_window: None,
collaboration_mode_kind: Default::default(),
})),
RolloutItem::Compacted(CompactedItem {
message: String::new(),
replacement_history: None,
}),
RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent {
turn_id: "turn-compact".into(),
last_agent_message: None,
})),
];
let turns = build_turns_from_rollout_items(&items);
assert_eq!(
turns,
vec![Turn {
id: "turn-compact".into(),
status: TurnStatus::Completed,
error: None,
items: Vec::new(),
}]
);
}
}

View File

@@ -88,7 +88,10 @@ macro_rules! v2_enum_from_core {
pub enum CodexErrorInfo {
ContextWindowExceeded,
UsageLimitExceeded,
ServerOverloaded,
ModelCap {
model: String,
reset_after_seconds: Option<u64>,
},
HttpConnectionFailed {
#[serde(rename = "httpStatusCode")]
#[ts(rename = "httpStatusCode")]
@@ -125,7 +128,13 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
match value {
CoreCodexErrorInfo::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded,
CoreCodexErrorInfo::UsageLimitExceeded => CodexErrorInfo::UsageLimitExceeded,
CoreCodexErrorInfo::ServerOverloaded => CodexErrorInfo::ServerOverloaded,
CoreCodexErrorInfo::ModelCap {
model,
reset_after_seconds,
} => CodexErrorInfo::ModelCap {
model,
reset_after_seconds,
},
CoreCodexErrorInfo::HttpConnectionFailed { http_status_code } => {
CodexErrorInfo::HttpConnectionFailed { http_status_code }
}
@@ -1000,10 +1009,7 @@ pub struct ChatgptAuthTokensRefreshResponse {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountRateLimitsResponse {
/// Backward-compatible single-bucket view; mirrors the historical payload.
pub rate_limits: RateLimitSnapshot,
/// Multi-bucket view keyed by metered `limit_id` (for example, `codex`).
pub rate_limits_by_limit_id: Option<HashMap<String, RateLimitSnapshot>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1952,6 +1958,12 @@ pub struct Thread {
pub id: String,
/// Usually the first user message in the thread, if available.
pub preview: String,
/// Tri-state conversation modalities signal:
/// - `None`: unknown / not yet determined
/// - `Some([Text])`: known to be text-only
/// - `Some([Text, Image])`: images are known to exist in context
#[serde(default)]
pub conversation_modalities: Option<Vec<InputModality>>,
/// Model provider used for this thread (for example, 'openai').
pub model_provider: String,
/// Unix timestamp (in seconds) when the thread was created.
@@ -3097,8 +3109,6 @@ pub struct AccountRateLimitsUpdatedNotification {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct RateLimitSnapshot {
pub limit_id: Option<String>,
pub limit_name: Option<String>,
pub primary: Option<RateLimitWindow>,
pub secondary: Option<RateLimitWindow>,
pub credits: Option<CreditsSnapshot>,
@@ -3108,8 +3118,6 @@ pub struct RateLimitSnapshot {
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
fn from(value: CoreRateLimitSnapshot) -> Self {
Self {
limit_id: value.limit_id,
limit_name: value.limit_name,
primary: value.primary.map(RateLimitWindow::from),
secondary: value.secondary.map(RateLimitWindow::from),
credits: value.credits.map(CreditsSnapshot::from),

View File

@@ -52,8 +52,6 @@ use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
@@ -114,13 +112,6 @@ enum CliCommand {
/// User message to send to Codex.
user_message: String,
},
/// Resume a V2 thread by id, then send a user message.
ResumeMessageV2 {
/// Existing thread id to resume.
thread_id: String,
/// User message to send to Codex.
user_message: String,
},
/// Start a V2 turn that elicits an ExecCommand approval.
#[command(name = "trigger-cmd-approval")]
TriggerCmdApproval {
@@ -170,16 +161,6 @@ pub fn run() -> Result<()> {
CliCommand::SendMessageV2 { user_message } => {
send_message_v2(&codex_bin, &config_overrides, user_message, &dynamic_tools)
}
CliCommand::ResumeMessageV2 {
thread_id,
user_message,
} => resume_message_v2(
&codex_bin,
&config_overrides,
thread_id,
user_message,
&dynamic_tools,
),
CliCommand::TriggerCmdApproval { user_message } => {
trigger_cmd_approval(&codex_bin, &config_overrides, user_message, &dynamic_tools)
}
@@ -252,41 +233,6 @@ pub fn send_message_v2(
)
}
fn resume_message_v2(
codex_bin: &Path,
config_overrides: &[String],
thread_id: String,
user_message: String,
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
) -> Result<()> {
ensure_dynamic_tools_unused(dynamic_tools, "resume-message-v2")?;
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let resume_response = client.thread_resume(ThreadResumeParams {
thread_id,
..Default::default()
})?;
println!("< thread/resume response: {resume_response:?}");
let turn_response = client.turn_start(TurnStartParams {
thread_id: resume_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: user_message,
text_elements: Vec::new(),
}],
..Default::default()
})?;
println!("< turn/start response: {turn_response:?}");
client.stream_turn(&resume_response.thread.id, &turn_response.turn.id)?;
Ok(())
}
fn trigger_cmd_approval(
codex_bin: &Path,
config_overrides: &[String],
@@ -646,16 +592,6 @@ impl CodexClient {
self.send_request(request, request_id, "thread/start")
}
fn thread_resume(&mut self, params: ThreadResumeParams) -> Result<ThreadResumeResponse> {
let request_id = self.request_id();
let request = ClientRequest::ThreadResume {
request_id: request_id.clone(),
params,
};
self.send_request(request, request_id, "thread/resume")
}
fn turn_start(&mut self, params: TurnStartParams) -> Result<TurnStartResponse> {
let request_id = self.request_id();
let request = ClientRequest::TurnStart {

View File

@@ -20,8 +20,8 @@ anyhow = { workspace = true }
async-trait = { workspace = true }
codex-arg0 = { workspace = true }
codex-cloud-requirements = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-utils-cli = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-chatgpt = { workspace = true }
@@ -30,12 +30,8 @@ codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-feedback = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-json-to-toml = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive"] }
futures = { workspace = true }
owo-colors = { workspace = true, features = ["supports-colors"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tempfile = { workspace = true }
@@ -48,7 +44,6 @@ tokio = { workspace = true, features = [
"rt-multi-thread",
"signal",
] }
tokio-tungstenite = { workspace = true }
tracing = { workspace = true, features = ["log"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
uuid = { workspace = true, features = ["serde", "v7"] }
@@ -62,8 +57,8 @@ axum = { workspace = true, default-features = false, features = [
] }
base64 = { workspace = true }
codex-execpolicy = { workspace = true }
codex-utils-absolute-path = { workspace = true }
core_test_support = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
rmcp = { workspace = true, default-features = false, features = [
@@ -71,6 +66,5 @@ rmcp = { workspace = true, default-features = false, features = [
"transport-streamable-http-server",
] }
serial_test = { workspace = true }
tokio-tungstenite = { workspace = true }
wiremock = { workspace = true }
shlex = { workspace = true }

View File

@@ -19,20 +19,7 @@
## Protocol
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication using JSON-RPC 2.0 messages (with the `"jsonrpc":"2.0"` header omitted on the wire).
Supported transports:
- stdio (`--listen stdio://`, default): newline-delimited JSON (JSONL)
- websocket (`--listen ws://IP:PORT`): one JSON-RPC message per websocket text frame (**experimental / unsupported**)
Websocket transport is currently experimental and unsupported. Do not rely on it for production workloads.
Backpressure behavior:
- The server uses bounded queues between transport ingress, request processing, and outbound writes.
- When request ingress is saturated, new requests are rejected with a JSON-RPC error code `-32001` and message `"Server overloaded; retry later."`.
- Clients should treat this as retryable and use exponential backoff with jitter.
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
## Message Schema
@@ -55,7 +42,7 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat
## Lifecycle Overview
- Initialize once per connection: Immediately after opening a transport connection, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request on that connection before this handshake gets rejected.
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
- Start (or resume) a thread: Call `thread/start` to open a fresh conversation. The response returns the thread object and youll also get a `thread/started` notification. If youre continuing an existing conversation, call `thread/resume` with its ID instead. If you want to branch from an existing conversation, call `thread/fork` to create a new thread id with copied history.
- Begin a turn: To send user input, call `turn/start` with the target `threadId` and the user's input. Optional fields let you override model, cwd, sandbox policy, etc. This immediately returns the new turn object and triggers a `turn/started` notification.
- Stream events: After `turn/start`, keep reading JSON-RPC notifications on stdout. Youll see `item/started`, `item/completed`, deltas like `item/agentMessage/delta`, tool progress, etc. These represent streaming model output plus any side effects (commands, tool calls, reasoning notes).
@@ -63,7 +50,7 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat
## Initialization
Clients must send a single `initialize` request per transport connection before invoking any other method on that connection, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls on the same connection receive an `"Already initialized"` error.
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
`initialize.params.capabilities` also supports per-connection notification opt-out via `optOutNotificationMethods`, which is a list of exact method names to suppress for that connection. Matching is exact (no wildcards/prefixes). Unknown method names are accepted and ignored.

View File

@@ -1,12 +1,15 @@
use crate::codex_message_processor::ApiVersion;
use crate::codex_message_processor::read_rollout_items_from_rollout;
use crate::codex_message_processor::PendingInterrupts;
use crate::codex_message_processor::PendingRollbacks;
use crate::codex_message_processor::TurnSummary;
use crate::codex_message_processor::TurnSummaryStore;
use crate::codex_message_processor::read_event_msgs_from_rollout;
use crate::codex_message_processor::read_summary_from_rollout;
use crate::codex_message_processor::fetch_state_db_conversation_modalities;
use crate::codex_message_processor::summary_to_thread;
use crate::error_code::INTERNAL_ERROR_CODE;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
use crate::thread_state::ThreadState;
use crate::thread_state::TurnSummary;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AgentMessageDeltaNotification;
use codex_app_server_protocol::ApplyPatchApprovalParams;
@@ -44,6 +47,8 @@ use codex_app_server_protocol::PatchApplyStatus;
use codex_app_server_protocol::PatchChangeKind as V2PatchChangeKind;
use codex_app_server_protocol::PlanDeltaNotification;
use codex_app_server_protocol::RawResponseItemCompletedNotification;
use codex_protocol::openai_models::InputModality;
use codex_protocol::openai_models::input_modalities_to_mask;
use codex_app_server_protocol::ReasoningSummaryPartAddedNotification;
use codex_app_server_protocol::ReasoningSummaryTextDeltaNotification;
use codex_app_server_protocol::ReasoningTextDeltaNotification;
@@ -67,7 +72,7 @@ use codex_app_server_protocol::TurnInterruptResponse;
use codex_app_server_protocol::TurnPlanStep;
use codex_app_server_protocol::TurnPlanUpdatedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::build_turns_from_rollout_items;
use codex_app_server_protocol::build_turns_from_event_msgs;
use codex_core::CodexThread;
use codex_core::parse_command::shlex_join;
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
@@ -96,9 +101,9 @@ use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio::sync::oneshot;
use tracing::error;
use tracing::warn;
type JsonValue = serde_json::Value;
@@ -107,8 +112,10 @@ pub(crate) async fn apply_bespoke_event_handling(
event: Event,
conversation_id: ThreadId,
conversation: Arc<CodexThread>,
outgoing: ThreadScopedOutgoingMessageSender,
thread_state: Arc<tokio::sync::Mutex<ThreadState>>,
outgoing: Arc<OutgoingMessageSender>,
pending_interrupts: PendingInterrupts,
pending_rollbacks: PendingRollbacks,
turn_summary_store: TurnSummaryStore,
api_version: ApiVersion,
fallback_model_provider: String,
) {
@@ -119,7 +126,13 @@ pub(crate) async fn apply_bespoke_event_handling(
match msg {
EventMsg::TurnStarted(_) => {}
EventMsg::TurnComplete(_ev) => {
handle_turn_complete(conversation_id, event_turn_id, &outgoing, &thread_state).await;
handle_turn_complete(
conversation_id,
event_turn_id,
&outgoing,
&turn_summary_store,
)
.await;
}
EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id,
@@ -150,11 +163,9 @@ pub(crate) async fn apply_bespoke_event_handling(
let patch_changes = convert_patch_changes(&changes);
let first_start = {
let mut state = thread_state.lock().await;
state
.turn_summary
.file_change_started
.insert(item_id.clone())
let mut map = turn_summary_store.lock().await;
let summary = map.entry(conversation_id).or_default();
summary.file_change_started.insert(item_id.clone())
};
if first_start {
let item = ThreadItem::FileChange {
@@ -191,7 +202,7 @@ pub(crate) async fn apply_bespoke_event_handling(
rx,
conversation,
outgoing,
thread_state.clone(),
turn_summary_store,
)
.await;
});
@@ -711,7 +722,7 @@ pub(crate) async fn apply_bespoke_event_handling(
return handle_thread_rollback_failed(
conversation_id,
message,
&thread_state,
&pending_rollbacks,
&outgoing,
)
.await;
@@ -722,7 +733,7 @@ pub(crate) async fn apply_bespoke_event_handling(
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
additional_details: None,
};
handle_error(conversation_id, turn_error.clone(), &thread_state).await;
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
outgoing
.send_server_notification(ServerNotification::Error(ErrorNotification {
error: turn_error.clone(),
@@ -845,12 +856,27 @@ pub(crate) async fn apply_bespoke_event_handling(
.await;
}
EventMsg::RawResponseItem(raw_response_item_event) => {
if raw_response_item_event.item.has_input_image() {
let modalities = [InputModality::Text, InputModality::Image];
if let Some(ctx) = conversation.state_db()
&& let Err(err) = ctx
.set_thread_conversation_modalities(
conversation_id,
input_modalities_to_mask(&modalities),
)
.await
{
warn!(
"failed to persist conversation modalities for thread {conversation_id}: {err}"
);
}
}
maybe_emit_raw_response_item_completed(
api_version,
conversation_id,
&event_turn_id,
raw_response_item_event.item,
&outgoing,
outgoing.as_ref(),
)
.await;
}
@@ -860,11 +886,9 @@ pub(crate) async fn apply_bespoke_event_handling(
let item_id = patch_begin_event.call_id.clone();
let first_start = {
let mut state = thread_state.lock().await;
state
.turn_summary
.file_change_started
.insert(item_id.clone())
let mut map = turn_summary_store.lock().await;
let summary = map.entry(conversation_id).or_default();
summary.file_change_started.insert(item_id.clone())
};
if first_start {
let item = ThreadItem::FileChange {
@@ -899,8 +923,8 @@ pub(crate) async fn apply_bespoke_event_handling(
changes,
status,
event_turn_id.clone(),
&outgoing,
&thread_state,
outgoing.as_ref(),
&turn_summary_store,
)
.await;
}
@@ -945,8 +969,9 @@ pub(crate) async fn apply_bespoke_event_handling(
// We need to detect which item type it is so we can emit the right notification.
// We already have state tracking FileChange items on item/started, so let's use that.
let is_file_change = {
let state = thread_state.lock().await;
state.turn_summary.file_change_started.contains(&item_id)
let map = turn_summary_store.lock().await;
map.get(&conversation_id)
.is_some_and(|summary| summary.file_change_started.contains(&item_id))
};
if is_file_change {
let notification = FileChangeOutputDeltaNotification {
@@ -1043,8 +1068,8 @@ pub(crate) async fn apply_bespoke_event_handling(
// If this is a TurnAborted, reply to any pending interrupt requests.
EventMsg::TurnAborted(turn_aborted_event) => {
let pending = {
let mut state = thread_state.lock().await;
std::mem::take(&mut state.pending_interrupts)
let mut map = pending_interrupts.lock().await;
map.remove(&conversation_id).unwrap_or_default()
};
if !pending.is_empty() {
for (rid, ver) in pending {
@@ -1063,12 +1088,18 @@ pub(crate) async fn apply_bespoke_event_handling(
}
}
handle_turn_interrupted(conversation_id, event_turn_id, &outgoing, &thread_state).await;
handle_turn_interrupted(
conversation_id,
event_turn_id,
&outgoing,
&turn_summary_store,
)
.await;
}
EventMsg::ThreadRolledBack(_rollback_event) => {
let pending = {
let mut state = thread_state.lock().await;
state.pending_rollbacks.take()
let mut map = pending_rollbacks.lock().await;
map.remove(&conversation_id)
};
if let Some(request_id) = pending {
@@ -1088,10 +1119,15 @@ pub(crate) async fn apply_bespoke_event_handling(
.await
{
Ok(summary) => {
let mut thread = summary_to_thread(summary);
match read_rollout_items_from_rollout(rollout_path.as_path()).await {
Ok(items) => {
thread.turns = build_turns_from_rollout_items(&items);
let conversation_modalities = fetch_state_db_conversation_modalities(
conversation.state_db().as_ref(),
conversation_id,
)
.await;
let mut thread = summary_to_thread(summary, conversation_modalities);
match read_event_msgs_from_rollout(rollout_path.as_path()).await {
Ok(events) => {
thread.turns = build_turns_from_event_msgs(&events);
ThreadRollbackResponse { thread }
}
Err(err) => {
@@ -1103,7 +1139,7 @@ pub(crate) async fn apply_bespoke_event_handling(
),
data: None,
};
outgoing.send_error(request_id.clone(), error).await;
outgoing.send_error(request_id, error).await;
return;
}
}
@@ -1117,7 +1153,7 @@ pub(crate) async fn apply_bespoke_event_handling(
),
data: None,
};
outgoing.send_error(request_id.clone(), error).await;
outgoing.send_error(request_id, error).await;
return;
}
};
@@ -1142,7 +1178,7 @@ pub(crate) async fn apply_bespoke_event_handling(
&event_turn_id,
turn_diff_event,
api_version,
&outgoing,
outgoing.as_ref(),
)
.await;
}
@@ -1152,7 +1188,7 @@ pub(crate) async fn apply_bespoke_event_handling(
&event_turn_id,
plan_update_event,
api_version,
&outgoing,
outgoing.as_ref(),
)
.await;
}
@@ -1166,7 +1202,7 @@ async fn handle_turn_diff(
event_turn_id: &str,
turn_diff_event: TurnDiffEvent,
api_version: ApiVersion,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
if let ApiVersion::V2 = api_version {
let notification = TurnDiffUpdatedNotification {
@@ -1185,7 +1221,7 @@ async fn handle_turn_plan_update(
event_turn_id: &str,
plan_update_event: UpdatePlanArgs,
api_version: ApiVersion,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
// `update_plan` is a todo/checklist tool; it is not related to plan-mode updates
if let ApiVersion::V2 = api_version {
@@ -1210,7 +1246,7 @@ async fn emit_turn_completed_with_status(
event_turn_id: String,
status: TurnStatus,
error: Option<TurnError>,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
let notification = TurnCompletedNotification {
thread_id: conversation_id.to_string(),
@@ -1232,12 +1268,15 @@ async fn complete_file_change_item(
changes: Vec<FileUpdateChange>,
status: PatchApplyStatus,
turn_id: String,
outgoing: &ThreadScopedOutgoingMessageSender,
thread_state: &Arc<Mutex<ThreadState>>,
outgoing: &OutgoingMessageSender,
turn_summary_store: &TurnSummaryStore,
) {
let mut state = thread_state.lock().await;
state.turn_summary.file_change_started.remove(&item_id);
drop(state);
{
let mut map = turn_summary_store.lock().await;
if let Some(summary) = map.get_mut(&conversation_id) {
summary.file_change_started.remove(&item_id);
}
}
let item = ThreadItem::FileChange {
id: item_id,
@@ -1264,7 +1303,7 @@ async fn complete_command_execution_item(
process_id: Option<String>,
command_actions: Vec<V2ParsedCommand>,
status: CommandExecutionStatus,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
let item = ThreadItem::CommandExecution {
id: item_id,
@@ -1292,7 +1331,7 @@ async fn maybe_emit_raw_response_item_completed(
conversation_id: ThreadId,
turn_id: &str,
item: codex_protocol::models::ResponseItem,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
let ApiVersion::V2 = api_version else {
return;
@@ -1309,20 +1348,20 @@ async fn maybe_emit_raw_response_item_completed(
}
async fn find_and_remove_turn_summary(
_conversation_id: ThreadId,
thread_state: &Arc<Mutex<ThreadState>>,
conversation_id: ThreadId,
turn_summary_store: &TurnSummaryStore,
) -> TurnSummary {
let mut state = thread_state.lock().await;
std::mem::take(&mut state.turn_summary)
let mut map = turn_summary_store.lock().await;
map.remove(&conversation_id).unwrap_or_default()
}
async fn handle_turn_complete(
conversation_id: ThreadId,
event_turn_id: String,
outgoing: &ThreadScopedOutgoingMessageSender,
thread_state: &Arc<Mutex<ThreadState>>,
outgoing: &OutgoingMessageSender,
turn_summary_store: &TurnSummaryStore,
) {
let turn_summary = find_and_remove_turn_summary(conversation_id, thread_state).await;
let turn_summary = find_and_remove_turn_summary(conversation_id, turn_summary_store).await;
let (status, error) = match turn_summary.last_error {
Some(error) => (TurnStatus::Failed, Some(error)),
@@ -1335,10 +1374,10 @@ async fn handle_turn_complete(
async fn handle_turn_interrupted(
conversation_id: ThreadId,
event_turn_id: String,
outgoing: &ThreadScopedOutgoingMessageSender,
thread_state: &Arc<Mutex<ThreadState>>,
outgoing: &OutgoingMessageSender,
turn_summary_store: &TurnSummaryStore,
) {
find_and_remove_turn_summary(conversation_id, thread_state).await;
find_and_remove_turn_summary(conversation_id, turn_summary_store).await;
emit_turn_completed_with_status(
conversation_id,
@@ -1351,12 +1390,15 @@ async fn handle_turn_interrupted(
}
async fn handle_thread_rollback_failed(
_conversation_id: ThreadId,
conversation_id: ThreadId,
message: String,
thread_state: &Arc<Mutex<ThreadState>>,
outgoing: &ThreadScopedOutgoingMessageSender,
pending_rollbacks: &PendingRollbacks,
outgoing: &OutgoingMessageSender,
) {
let pending_rollback = thread_state.lock().await.pending_rollbacks.take();
let pending_rollback = {
let mut map = pending_rollbacks.lock().await;
map.remove(&conversation_id)
};
if let Some(request_id) = pending_rollback {
outgoing
@@ -1376,7 +1418,7 @@ async fn handle_token_count_event(
conversation_id: ThreadId,
turn_id: String,
token_count_event: TokenCountEvent,
outgoing: &ThreadScopedOutgoingMessageSender,
outgoing: &OutgoingMessageSender,
) {
let TokenCountEvent { info, rate_limits } = token_count_event;
if let Some(token_usage) = info.map(ThreadTokenUsage::from) {
@@ -1401,12 +1443,12 @@ async fn handle_token_count_event(
}
async fn handle_error(
_conversation_id: ThreadId,
conversation_id: ThreadId,
error: TurnError,
thread_state: &Arc<Mutex<ThreadState>>,
turn_summary_store: &TurnSummaryStore,
) {
let mut state = thread_state.lock().await;
state.turn_summary.last_error = Some(error);
let mut map = turn_summary_store.lock().await;
map.entry(conversation_id).or_default().last_error = Some(error);
}
async fn on_patch_approval_response(
@@ -1633,8 +1675,8 @@ async fn on_file_change_request_approval_response(
changes: Vec<FileUpdateChange>,
receiver: oneshot::Receiver<JsonValue>,
codex: Arc<CodexThread>,
outgoing: ThreadScopedOutgoingMessageSender,
thread_state: Arc<Mutex<ThreadState>>,
outgoing: Arc<OutgoingMessageSender>,
turn_summary_store: TurnSummaryStore,
) {
let response = receiver.await;
let (decision, completion_status) = match response {
@@ -1666,8 +1708,8 @@ async fn on_file_change_request_approval_response(
changes,
status,
event_turn_id.clone(),
&outgoing,
&thread_state,
outgoing.as_ref(),
&turn_summary_store,
)
.await;
}
@@ -1693,7 +1735,7 @@ async fn on_command_execution_request_approval_response(
command_actions: Vec<V2ParsedCommand>,
receiver: oneshot::Receiver<JsonValue>,
conversation: Arc<CodexThread>,
outgoing: ThreadScopedOutgoingMessageSender,
outgoing: Arc<OutgoingMessageSender>,
) {
let response = receiver.await;
let (decision, completion_status) = match response {
@@ -1748,7 +1790,7 @@ async fn on_command_execution_request_approval_response(
None,
command_actions.clone(),
status,
&outgoing,
outgoing.as_ref(),
)
.await;
}
@@ -1876,8 +1918,6 @@ async fn construct_mcp_tool_call_end_notification(
mod tests {
use super::*;
use crate::CHANNEL_CAPACITY;
use crate::outgoing_message::ConnectionId;
use crate::outgoing_message::OutgoingEnvelope;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use anyhow::Result;
@@ -1898,25 +1938,13 @@ mod tests {
use pretty_assertions::assert_eq;
use rmcp::model::Content;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
fn new_thread_state() -> Arc<Mutex<ThreadState>> {
Arc::new(Mutex::new(ThreadState::default()))
}
async fn recv_broadcast_message(
rx: &mut mpsc::Receiver<OutgoingEnvelope>,
) -> Result<OutgoingMessage> {
let envelope = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one message"))?;
match envelope {
OutgoingEnvelope::Broadcast { message } => Ok(message),
OutgoingEnvelope::ToConnection { message, .. } => Ok(message),
}
fn new_turn_summary_store() -> TurnSummaryStore {
Arc::new(Mutex::new(HashMap::new()))
}
#[test]
@@ -1979,7 +2007,7 @@ mod tests {
#[tokio::test]
async fn test_handle_error_records_message() -> Result<()> {
let conversation_id = ThreadId::new();
let thread_state = new_thread_state();
let turn_summary_store = new_turn_summary_store();
handle_error(
conversation_id,
@@ -1988,11 +2016,11 @@ mod tests {
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
additional_details: None,
},
&thread_state,
&turn_summary_store,
)
.await;
let turn_summary = find_and_remove_turn_summary(conversation_id, &thread_state).await;
let turn_summary = find_and_remove_turn_summary(conversation_id, &turn_summary_store).await;
assert_eq!(
turn_summary.last_error,
Some(TurnError {
@@ -2010,18 +2038,20 @@ mod tests {
let event_turn_id = "complete1".to_string();
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
let thread_state = new_thread_state();
let turn_summary_store = new_turn_summary_store();
handle_turn_complete(
conversation_id,
event_turn_id.clone(),
&outgoing,
&thread_state,
&turn_summary_store,
)
.await;
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
@@ -2038,7 +2068,7 @@ mod tests {
async fn test_handle_turn_interrupted_emits_interrupted_with_error() -> Result<()> {
let conversation_id = ThreadId::new();
let event_turn_id = "interrupt1".to_string();
let thread_state = new_thread_state();
let turn_summary_store = new_turn_summary_store();
handle_error(
conversation_id,
TurnError {
@@ -2046,22 +2076,24 @@ mod tests {
codex_error_info: None,
additional_details: None,
},
&thread_state,
&turn_summary_store,
)
.await;
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
handle_turn_interrupted(
conversation_id,
event_turn_id.clone(),
&outgoing,
&thread_state,
&turn_summary_store,
)
.await;
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
@@ -2078,7 +2110,7 @@ mod tests {
async fn test_handle_turn_complete_emits_failed_with_error() -> Result<()> {
let conversation_id = ThreadId::new();
let event_turn_id = "complete_err1".to_string();
let thread_state = new_thread_state();
let turn_summary_store = new_turn_summary_store();
handle_error(
conversation_id,
TurnError {
@@ -2086,22 +2118,24 @@ mod tests {
codex_error_info: Some(V2CodexErrorInfo::Other),
additional_details: None,
},
&thread_state,
&turn_summary_store,
)
.await;
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
handle_turn_complete(
conversation_id,
event_turn_id.clone(),
&outgoing,
&thread_state,
&turn_summary_store,
)
.await;
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
@@ -2124,8 +2158,7 @@ mod tests {
#[tokio::test]
async fn test_handle_turn_plan_update_emits_notification_for_v2() -> Result<()> {
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
let outgoing = OutgoingMessageSender::new(tx);
let update = UpdatePlanArgs {
explanation: Some("need plan".to_string()),
plan: vec![
@@ -2151,7 +2184,10 @@ mod tests {
)
.await;
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnPlanUpdated(n)) => {
assert_eq!(n.thread_id, conversation_id.to_string());
@@ -2175,7 +2211,6 @@ mod tests {
let turn_id = "turn-123".to_string();
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
let info = TokenUsageInfo {
total_token_usage: TokenUsage {
@@ -2195,8 +2230,6 @@ mod tests {
model_context_window: Some(4096),
};
let rate_limits = RateLimitSnapshot {
limit_id: Some("codex".to_string()),
limit_name: None,
primary: Some(RateLimitWindow {
used_percent: 42.5,
window_minutes: Some(15),
@@ -2222,7 +2255,10 @@ mod tests {
)
.await;
let first = recv_broadcast_message(&mut rx).await?;
let first = rx
.recv()
.await
.ok_or_else(|| anyhow!("expected usage notification"))?;
match first {
OutgoingMessage::AppServerNotification(
ServerNotification::ThreadTokenUsageUpdated(payload),
@@ -2238,13 +2274,14 @@ mod tests {
other => bail!("unexpected notification: {other:?}"),
}
let second = recv_broadcast_message(&mut rx).await?;
let second = rx
.recv()
.await
.ok_or_else(|| anyhow!("expected rate limit notification"))?;
match second {
OutgoingMessage::AppServerNotification(
ServerNotification::AccountRateLimitsUpdated(payload),
) => {
assert_eq!(payload.rate_limits.limit_id.as_deref(), Some("codex"));
assert_eq!(payload.rate_limits.limit_name, None);
assert!(payload.rate_limits.primary.is_some());
assert!(payload.rate_limits.credits.is_some());
}
@@ -2259,7 +2296,6 @@ mod tests {
let turn_id = "turn-456".to_string();
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
handle_token_count_event(
conversation_id,
@@ -2322,11 +2358,10 @@ mod tests {
// Conversation A will have two turns; Conversation B will have one turn.
let conversation_a = ThreadId::new();
let conversation_b = ThreadId::new();
let thread_state = new_thread_state();
let turn_summary_store = new_turn_summary_store();
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
// Turn 1 on conversation A
let a_turn1 = "a_turn1".to_string();
@@ -2337,10 +2372,16 @@ mod tests {
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
additional_details: None,
},
&thread_state,
&turn_summary_store,
)
.await;
handle_turn_complete(
conversation_a,
a_turn1.clone(),
&outgoing,
&turn_summary_store,
)
.await;
handle_turn_complete(conversation_a, a_turn1.clone(), &outgoing, &thread_state).await;
// Turn 1 on conversation B
let b_turn1 = "b_turn1".to_string();
@@ -2351,17 +2392,32 @@ mod tests {
codex_error_info: None,
additional_details: None,
},
&thread_state,
&turn_summary_store,
)
.await;
handle_turn_complete(
conversation_b,
b_turn1.clone(),
&outgoing,
&turn_summary_store,
)
.await;
handle_turn_complete(conversation_b, b_turn1.clone(), &outgoing, &thread_state).await;
// Turn 2 on conversation A
let a_turn2 = "a_turn2".to_string();
handle_turn_complete(conversation_a, a_turn2.clone(), &outgoing, &thread_state).await;
handle_turn_complete(
conversation_a,
a_turn2.clone(),
&outgoing,
&turn_summary_store,
)
.await;
// Verify: A turn 1
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send first notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn1);
@@ -2379,7 +2435,10 @@ mod tests {
}
// Verify: B turn 1
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send second notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, b_turn1);
@@ -2397,7 +2456,10 @@ mod tests {
}
// Verify: A turn 2
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send third notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn2);
@@ -2548,8 +2610,7 @@ mod tests {
#[tokio::test]
async fn test_handle_turn_diff_emits_v2_notification() -> Result<()> {
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
let outgoing = OutgoingMessageSender::new(tx);
let unified_diff = "--- a\n+++ b\n".to_string();
let conversation_id = ThreadId::new();
@@ -2564,7 +2625,10 @@ mod tests {
)
.await;
let msg = recv_broadcast_message(&mut rx).await?;
let msg = rx
.recv()
.await
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnDiffUpdated(
notification,
@@ -2582,8 +2646,7 @@ mod tests {
#[tokio::test]
async fn test_handle_turn_diff_is_noop_for_v1() -> Result<()> {
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
let outgoing = ThreadScopedOutgoingMessageSender::new(outgoing, vec![ConnectionId(1)]);
let outgoing = OutgoingMessageSender::new(tx);
let conversation_id = ThreadId::new();
handle_turn_diff(

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,2 @@
pub(crate) const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
pub(crate) const INTERNAL_ERROR_CODE: i64 = -32603;
pub(crate) const OVERLOADED_ERROR_CODE: i64 = -32001;

View File

@@ -1,35 +1,21 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_cloud_requirements::cloud_requirements_loader;
use codex_common::CliConfigOverrides;
use codex_core::AuthManager;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
use codex_utils_cli::CliConfigOverrides;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::atomic::AtomicBool;
use crate::message_processor::MessageProcessor;
use crate::message_processor::MessageProcessorArgs;
use crate::outgoing_message::ConnectionId;
use crate::outgoing_message::OutgoingEnvelope;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use crate::transport::CHANNEL_CAPACITY;
use crate::transport::ConnectionState;
use crate::transport::OutboundConnectionState;
use crate::transport::TransportEvent;
use crate::transport::route_outgoing_envelope;
use crate::transport::start_stdio_connection;
use crate::transport::start_websocket_acceptor;
use codex_app_server_protocol::ConfigLayerSource;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::JSONRPCMessage;
@@ -40,9 +26,13 @@ use codex_core::check_execpolicy_for_warnings;
use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::TextRange as CoreTextRange;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::io::{self};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use toml::Value as TomlValue;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
@@ -61,31 +51,11 @@ mod fuzzy_file_search;
mod message_processor;
mod models;
mod outgoing_message;
mod thread_state;
mod transport;
pub use crate::transport::AppServerTransport;
/// Control-plane messages from the processor/transport side to the outbound router task.
///
/// `run_main_with_transport` now uses two loops/tasks:
/// - processor loop: handles incoming JSON-RPC and request dispatch
/// - outbound loop: performs potentially slow writes to per-connection writers
///
/// `OutboundControlEvent` keeps those loops coordinated without sharing mutable
/// connection state directly. In particular, the outbound loop needs to know
/// when a connection opens/closes so it can route messages correctly.
enum OutboundControlEvent {
/// Register a new writer for an opened connection.
Opened {
connection_id: ConnectionId,
writer: mpsc::Sender<crate::outgoing_message::OutgoingMessage>,
initialized: Arc<AtomicBool>,
opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
},
/// Remove state for a closed/disconnected connection.
Closed { connection_id: ConnectionId },
}
/// Size of the bounded channels used to communicate between tasks. The value
/// is a balance between throughput and memory usage 128 messages should be
/// plenty for an interactive CLI.
const CHANNEL_CAPACITY: usize = 128;
fn config_warning_from_error(
summary: impl Into<String>,
@@ -203,41 +173,32 @@ pub async fn run_main(
loader_overrides: LoaderOverrides,
default_analytics_enabled: bool,
) -> IoResult<()> {
run_main_with_transport(
codex_linux_sandbox_exe,
cli_config_overrides,
loader_overrides,
default_analytics_enabled,
AppServerTransport::Stdio,
)
.await
}
// Set up channels.
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
let (outgoing_tx, mut outgoing_rx) = mpsc::channel::<OutgoingMessage>(CHANNEL_CAPACITY);
pub async fn run_main_with_transport(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
loader_overrides: LoaderOverrides,
default_analytics_enabled: bool,
transport: AppServerTransport,
) -> IoResult<()> {
let (transport_event_tx, mut transport_event_rx) =
mpsc::channel::<TransportEvent>(CHANNEL_CAPACITY);
let (outgoing_tx, mut outgoing_rx) = mpsc::channel::<OutgoingEnvelope>(CHANNEL_CAPACITY);
let (outbound_control_tx, mut outbound_control_rx) =
mpsc::channel::<OutboundControlEvent>(CHANNEL_CAPACITY);
// Task: read from stdin, push to `incoming_tx`.
let stdin_reader_handle = tokio::spawn({
async move {
let stdin = io::stdin();
let reader = BufReader::new(stdin);
let mut lines = reader.lines();
let mut stdio_handles = Vec::<JoinHandle<()>>::new();
let mut websocket_accept_handle = None;
match transport {
AppServerTransport::Stdio => {
start_stdio_connection(transport_event_tx.clone(), &mut stdio_handles).await?;
while let Some(line) = lines.next_line().await.unwrap_or_default() {
match serde_json::from_str::<JSONRPCMessage>(&line) {
Ok(msg) => {
if incoming_tx.send(msg).await.is_err() {
// Receiver gone nothing left to do.
break;
}
}
Err(e) => error!("Failed to deserialize JSONRPCMessage: {e}"),
}
}
debug!("stdin reader finished (EOF)");
}
AppServerTransport::WebSocket { bind_address } => {
websocket_accept_handle =
Some(start_websocket_acceptor(bind_address, transport_event_tx.clone()).await?);
}
}
let shutdown_when_no_connections = matches!(transport, AppServerTransport::Stdio);
});
// Parse CLI overrides once and derive the base Config eagerly so later
// components do not need to work with raw TOML values.
@@ -276,11 +237,7 @@ pub async fn run_main_with_transport(
false,
config.cli_auth_credentials_store_mode,
);
cloud_requirements_loader(
auth_manager,
config.chatgpt_base_url,
config.codex_home.clone(),
)
cloud_requirements_loader(auth_manager, config.chatgpt_base_url)
}
Err(err) => {
warn!(error = %err, "Failed to preload config for cloud requirements");
@@ -368,76 +325,15 @@ pub async fn run_main_with_transport(
}
}
let transport_event_tx_for_outbound = transport_event_tx.clone();
let outbound_handle = tokio::spawn(async move {
let mut outbound_connections = HashMap::<ConnectionId, OutboundConnectionState>::new();
let mut pending_closed_connections = VecDeque::<ConnectionId>::new();
loop {
tokio::select! {
biased;
event = outbound_control_rx.recv() => {
let Some(event) = event else {
break;
};
match event {
OutboundControlEvent::Opened {
connection_id,
writer,
initialized,
opted_out_notification_methods,
} => {
outbound_connections.insert(
connection_id,
OutboundConnectionState::new(
writer,
initialized,
opted_out_notification_methods,
),
);
}
OutboundControlEvent::Closed { connection_id } => {
outbound_connections.remove(&connection_id);
}
}
}
envelope = outgoing_rx.recv() => {
let Some(envelope) = envelope else {
break;
};
let disconnected_connections =
route_outgoing_envelope(&mut outbound_connections, envelope).await;
pending_closed_connections.extend(disconnected_connections);
}
}
while let Some(connection_id) = pending_closed_connections.front().copied() {
match transport_event_tx_for_outbound
.try_send(TransportEvent::ConnectionClosed { connection_id })
{
Ok(()) => {
pending_closed_connections.pop_front();
}
Err(mpsc::error::TrySendError::Full(_)) => {
break;
}
Err(mpsc::error::TrySendError::Closed(_)) => {
return;
}
}
}
}
info!("outbound router task exited (channel closed)");
});
// Task: process incoming messages.
let processor_handle = tokio::spawn({
let outgoing_message_sender = Arc::new(OutgoingMessageSender::new(outgoing_tx));
let outbound_control_tx = outbound_control_tx;
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
let loader_overrides = loader_overrides_for_config_api;
let mut processor = MessageProcessor::new(MessageProcessorArgs {
outgoing: outgoing_message_sender,
codex_linux_sandbox_exe,
config: Arc::new(config),
config: std::sync::Arc::new(config),
cli_overrides,
loader_overrides,
cloud_requirements: cloud_requirements.clone(),
@@ -445,119 +341,25 @@ pub async fn run_main_with_transport(
config_warnings,
});
let mut thread_created_rx = processor.thread_created_receiver();
let mut connections = HashMap::<ConnectionId, ConnectionState>::new();
async move {
let mut listen_for_threads = true;
loop {
tokio::select! {
event = transport_event_rx.recv() => {
let Some(event) = event else {
msg = incoming_rx.recv() => {
let Some(msg) = msg else {
break;
};
match event {
TransportEvent::ConnectionOpened { connection_id, writer } => {
let outbound_initialized = Arc::new(AtomicBool::new(false));
let outbound_opted_out_notification_methods =
Arc::new(RwLock::new(HashSet::new()));
if outbound_control_tx
.send(OutboundControlEvent::Opened {
connection_id,
writer,
initialized: Arc::clone(&outbound_initialized),
opted_out_notification_methods: Arc::clone(
&outbound_opted_out_notification_methods,
),
})
.await
.is_err()
{
break;
}
connections.insert(
connection_id,
ConnectionState::new(
outbound_initialized,
outbound_opted_out_notification_methods,
),
);
}
TransportEvent::ConnectionClosed { connection_id } => {
if outbound_control_tx
.send(OutboundControlEvent::Closed { connection_id })
.await
.is_err()
{
break;
}
processor.connection_closed(connection_id).await;
connections.remove(&connection_id);
if shutdown_when_no_connections && connections.is_empty() {
break;
}
}
TransportEvent::IncomingMessage { connection_id, message } => {
match message {
JSONRPCMessage::Request(request) => {
let Some(connection_state) = connections.get_mut(&connection_id) else {
warn!("dropping request from unknown connection: {:?}", connection_id);
continue;
};
let was_initialized = connection_state.session.initialized;
processor
.process_request(
connection_id,
request,
&mut connection_state.session,
&connection_state.outbound_initialized,
)
.await;
if let Ok(mut opted_out_notification_methods) = connection_state
.outbound_opted_out_notification_methods
.write()
{
*opted_out_notification_methods = connection_state
.session
.opted_out_notification_methods
.clone();
} else {
warn!(
"failed to update outbound opted-out notifications"
);
}
if !was_initialized && connection_state.session.initialized {
processor.send_initialize_notifications().await;
}
}
JSONRPCMessage::Response(response) => {
processor.process_response(response).await;
}
JSONRPCMessage::Notification(notification) => {
processor.process_notification(notification).await;
}
JSONRPCMessage::Error(err) => {
processor.process_error(err).await;
}
}
}
match msg {
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e).await,
}
}
created = thread_created_rx.recv(), if listen_for_threads => {
match created {
Ok(thread_id) => {
let initialized_connection_ids: Vec<ConnectionId> = connections
.iter()
.filter_map(|(connection_id, connection_state)| {
connection_state.session.initialized.then_some(*connection_id)
})
.collect();
if !initialized_connection_ids.is_empty() {
processor
.try_attach_thread_listener(
thread_id,
initialized_connection_ids,
)
.await;
}
processor.try_attach_thread_listener(thread_id).await;
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => {
// TODO(jif) handle lag.
@@ -578,18 +380,33 @@ pub async fn run_main_with_transport(
}
});
drop(transport_event_tx);
// Task: write outgoing messages to stdout.
let stdout_writer_handle = tokio::spawn(async move {
let mut stdout = io::stdout();
while let Some(outgoing_message) = outgoing_rx.recv().await {
let Ok(value) = serde_json::to_value(outgoing_message) else {
error!("Failed to convert OutgoingMessage to JSON value");
continue;
};
match serde_json::to_string(&value) {
Ok(mut json) => {
json.push('\n');
if let Err(e) = stdout.write_all(json.as_bytes()).await {
error!("Failed to write to stdout: {e}");
break;
}
}
Err(e) => error!("Failed to serialize JSONRPCMessage: {e}"),
}
}
let _ = processor_handle.await;
let _ = outbound_handle.await;
info!("stdout writer exited (channel closed)");
});
if let Some(handle) = websocket_accept_handle {
handle.abort();
}
for handle in stdio_handles {
let _ = handle.await;
}
// Wait for all tasks to finish. The typical exit path is the stdin reader
// hitting EOF which, once it drops `incoming_tx`, propagates shutdown to
// the processor and then to the stdout task.
let _ = tokio::join!(stdin_reader_handle, processor_handle, stdout_writer_handle);
Ok(())
}

View File

@@ -1,43 +1,26 @@
use clap::Parser;
use codex_app_server::AppServerTransport;
use codex_app_server::run_main_with_transport;
use codex_app_server::run_main;
use codex_arg0::arg0_dispatch_or_else;
use codex_common::CliConfigOverrides;
use codex_core::config_loader::LoaderOverrides;
use codex_utils_cli::CliConfigOverrides;
use std::path::PathBuf;
// Debug-only test hook: lets integration tests point the server at a temporary
// managed config file without writing to /etc.
const MANAGED_CONFIG_PATH_ENV_VAR: &str = "CODEX_APP_SERVER_MANAGED_CONFIG_PATH";
#[derive(Debug, Parser)]
struct AppServerArgs {
/// Transport endpoint URL. Supported values: `stdio://` (default),
/// `ws://IP:PORT`.
#[arg(
long = "listen",
value_name = "URL",
default_value = AppServerTransport::DEFAULT_LISTEN_URL
)]
listen: AppServerTransport,
}
fn main() -> anyhow::Result<()> {
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
let args = AppServerArgs::parse();
let managed_config_path = managed_config_path_from_debug_env();
let loader_overrides = LoaderOverrides {
managed_config_path,
..Default::default()
};
let transport = args.listen;
run_main_with_transport(
run_main(
codex_linux_sandbox_exe,
CliConfigOverrides::default(),
loader_overrides,
false,
transport,
)
.await?;
Ok(())

View File

@@ -1,4 +1,3 @@
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::RwLock;
@@ -9,8 +8,6 @@ use crate::codex_message_processor::CodexMessageProcessor;
use crate::codex_message_processor::CodexMessageProcessorArgs;
use crate::config_api::ConfigApi;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::ConnectionId;
use crate::outgoing_message::ConnectionRequestId;
use crate::outgoing_message::OutgoingMessageSender;
use async_trait::async_trait;
use codex_app_server_protocol::ChatgptAuthTokensRefreshParams;
@@ -29,6 +26,7 @@ use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::experimental_required_message;
@@ -114,18 +112,13 @@ pub(crate) struct MessageProcessor {
codex_message_processor: CodexMessageProcessor,
config_api: ConfigApi,
config: Arc<Config>,
config_warnings: Arc<Vec<ConfigWarningNotification>>,
}
#[derive(Clone, Debug, Default)]
pub(crate) struct ConnectionSessionState {
pub(crate) initialized: bool,
experimental_api_enabled: bool,
pub(crate) opted_out_notification_methods: HashSet<String>,
initialized: bool,
experimental_api_enabled: Arc<AtomicBool>,
config_warnings: Vec<ConfigWarningNotification>,
}
pub(crate) struct MessageProcessorArgs {
pub(crate) outgoing: Arc<OutgoingMessageSender>,
pub(crate) outgoing: OutgoingMessageSender,
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
pub(crate) config: Arc<Config>,
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
@@ -149,6 +142,8 @@ impl MessageProcessor {
feedback,
config_warnings,
} = args;
let outgoing = Arc::new(outgoing);
let experimental_api_enabled = Arc::new(AtomicBool::new(false));
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
@@ -186,21 +181,14 @@ impl MessageProcessor {
codex_message_processor,
config_api,
config,
config_warnings: Arc::new(config_warnings),
initialized: false,
experimental_api_enabled,
config_warnings,
}
}
pub(crate) async fn process_request(
&mut self,
connection_id: ConnectionId,
request: JSONRPCRequest,
session: &mut ConnectionSessionState,
outbound_initialized: &AtomicBool,
) {
let request_id = ConnectionRequestId {
connection_id,
request_id: request.id.clone(),
};
pub(crate) async fn process_request(&mut self, request: JSONRPCRequest) {
let request_id = request.id.clone();
let request_json = match serde_json::to_value(&request) {
Ok(request_json) => request_json,
Err(err) => {
@@ -231,11 +219,7 @@ impl MessageProcessor {
// Handle Initialize internally so CodexMessageProcessor does not have to concern
// itself with the `initialized` bool.
ClientRequest::Initialize { request_id, params } => {
let request_id = ConnectionRequestId {
connection_id,
request_id,
};
if session.initialized {
if self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Already initialized".to_string(),
@@ -244,12 +228,6 @@ impl MessageProcessor {
self.outgoing.send_error(request_id, error).await;
return;
} else {
// TODO(maxj): Revisit capability scoping for `experimental_api_enabled`.
// Current behavior is per-connection. Reviewer feedback notes this can
// create odd cross-client behavior (for example dynamic tool calls on a
// shared thread when another connected client did not opt into
// experimental API). Proposed direction is instance-global first-write-wins
// with initialize-time mismatch rejection.
let (experimental_api_enabled, opt_out_notification_methods) =
match params.capabilities {
Some(capabilities) => (
@@ -260,9 +238,11 @@ impl MessageProcessor {
),
None => (false, Vec::new()),
};
session.experimental_api_enabled = experimental_api_enabled;
session.opted_out_notification_methods =
opt_out_notification_methods.into_iter().collect();
self.experimental_api_enabled
.store(experimental_api_enabled, Ordering::Relaxed);
self.outgoing
.set_opted_out_notification_methods(opt_out_notification_methods)
.await;
let ClientInfo {
name,
title: _title,
@@ -278,7 +258,7 @@ impl MessageProcessor {
),
data: None,
};
self.outgoing.send_error(request_id.clone(), error).await;
self.outgoing.send_error(request_id, error).await;
return;
}
SetOriginatorError::AlreadyInitialized => {
@@ -299,13 +279,22 @@ impl MessageProcessor {
let response = InitializeResponse { user_agent };
self.outgoing.send_response(request_id, response).await;
session.initialized = true;
outbound_initialized.store(true, Ordering::Release);
self.initialized = true;
if !self.config_warnings.is_empty() {
for notification in self.config_warnings.drain(..) {
self.outgoing
.send_server_notification(ServerNotification::ConfigWarning(
notification,
))
.await;
}
}
return;
}
}
_ => {
if !session.initialized {
if !self.initialized {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "Not initialized".to_string(),
@@ -318,7 +307,7 @@ impl MessageProcessor {
}
if let Some(reason) = codex_request.experimental_reason()
&& !session.experimental_api_enabled
&& !self.experimental_api_enabled.load(Ordering::Relaxed)
{
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
@@ -331,49 +320,22 @@ impl MessageProcessor {
match codex_request {
ClientRequest::ConfigRead { request_id, params } => {
self.handle_config_read(
ConnectionRequestId {
connection_id,
request_id,
},
params,
)
.await;
self.handle_config_read(request_id, params).await;
}
ClientRequest::ConfigValueWrite { request_id, params } => {
self.handle_config_value_write(
ConnectionRequestId {
connection_id,
request_id,
},
params,
)
.await;
self.handle_config_value_write(request_id, params).await;
}
ClientRequest::ConfigBatchWrite { request_id, params } => {
self.handle_config_batch_write(
ConnectionRequestId {
connection_id,
request_id,
},
params,
)
.await;
self.handle_config_batch_write(request_id, params).await;
}
ClientRequest::ConfigRequirementsRead {
request_id,
params: _,
} => {
self.handle_config_requirements_read(ConnectionRequestId {
connection_id,
request_id,
})
.await;
self.handle_config_requirements_read(request_id).await;
}
other => {
self.codex_message_processor
.process_request(connection_id, other)
.await;
self.codex_message_processor.process_request(other).await;
}
}
}
@@ -388,27 +350,12 @@ impl MessageProcessor {
self.codex_message_processor.thread_created_receiver()
}
pub(crate) async fn send_initialize_notifications(&self) {
for notification in self.config_warnings.iter().cloned() {
self.outgoing
.send_server_notification(ServerNotification::ConfigWarning(notification))
.await;
pub(crate) async fn try_attach_thread_listener(&mut self, thread_id: ThreadId) {
if !self.initialized {
return;
}
}
pub(crate) async fn try_attach_thread_listener(
&mut self,
thread_id: ThreadId,
connection_ids: Vec<ConnectionId>,
) {
self.codex_message_processor
.try_attach_thread_listener(thread_id, connection_ids)
.await;
}
pub(crate) async fn connection_closed(&mut self, connection_id: ConnectionId) {
self.codex_message_processor
.connection_closed(connection_id)
.try_attach_thread_listener(thread_id)
.await;
}
@@ -425,7 +372,7 @@ impl MessageProcessor {
self.outgoing.notify_client_error(err.id, err.error).await;
}
async fn handle_config_read(&self, request_id: ConnectionRequestId, params: ConfigReadParams) {
async fn handle_config_read(&self, request_id: RequestId, params: ConfigReadParams) {
match self.config_api.read(params).await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,
@@ -434,7 +381,7 @@ impl MessageProcessor {
async fn handle_config_value_write(
&self,
request_id: ConnectionRequestId,
request_id: RequestId,
params: ConfigValueWriteParams,
) {
match self.config_api.write_value(params).await {
@@ -445,7 +392,7 @@ impl MessageProcessor {
async fn handle_config_batch_write(
&self,
request_id: ConnectionRequestId,
request_id: RequestId,
params: ConfigBatchWriteParams,
) {
match self.config_api.batch_write(params).await {
@@ -454,7 +401,7 @@ impl MessageProcessor {
}
}
async fn handle_config_requirements_read(&self, request_id: ConnectionRequestId) {
async fn handle_config_requirements_read(&self, request_id: RequestId) {
match self.config_api.config_requirements_read().await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,

View File

@@ -1,5 +1,5 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::collections::HashSet;
use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering;
@@ -20,108 +20,40 @@ use crate::error_code::INTERNAL_ERROR_CODE;
#[cfg(test)]
use codex_protocol::account::PlanType;
/// Stable identifier for a transport connection.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub(crate) struct ConnectionId(pub(crate) u64);
/// Stable identifier for a client request scoped to a transport connection.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub(crate) struct ConnectionRequestId {
pub(crate) connection_id: ConnectionId,
pub(crate) request_id: RequestId,
}
#[derive(Debug, Clone)]
pub(crate) enum OutgoingEnvelope {
ToConnection {
connection_id: ConnectionId,
message: OutgoingMessage,
},
Broadcast {
message: OutgoingMessage,
},
}
/// Sends messages to the client and manages request callbacks.
pub(crate) struct OutgoingMessageSender {
next_server_request_id: AtomicI64,
sender: mpsc::Sender<OutgoingEnvelope>,
next_request_id: AtomicI64,
sender: mpsc::Sender<OutgoingMessage>,
request_id_to_callback: Mutex<HashMap<RequestId, oneshot::Sender<Result>>>,
opted_out_notification_methods: Mutex<HashSet<String>>,
}
#[derive(Clone)]
pub(crate) struct ThreadScopedOutgoingMessageSender {
outgoing: Arc<OutgoingMessageSender>,
connection_ids: Arc<Vec<ConnectionId>>,
}
impl ThreadScopedOutgoingMessageSender {
pub(crate) fn new(
outgoing: Arc<OutgoingMessageSender>,
connection_ids: Vec<ConnectionId>,
) -> Self {
impl OutgoingMessageSender {
pub(crate) fn new(sender: mpsc::Sender<OutgoingMessage>) -> Self {
Self {
outgoing,
connection_ids: Arc::new(connection_ids),
next_request_id: AtomicI64::new(0),
sender,
request_id_to_callback: Mutex::new(HashMap::new()),
opted_out_notification_methods: Mutex::new(HashSet::new()),
}
}
pub(crate) async fn set_opted_out_notification_methods(&self, methods: Vec<String>) {
let mut opted_out = self.opted_out_notification_methods.lock().await;
opted_out.clear();
opted_out.extend(methods);
}
async fn should_skip_notification(&self, method: &str) -> bool {
let opted_out = self.opted_out_notification_methods.lock().await;
opted_out.contains(method)
}
pub(crate) async fn send_request(
&self,
payload: ServerRequestPayload,
) -> oneshot::Receiver<Result> {
if self.connection_ids.is_empty() {
let (_tx, rx) = oneshot::channel();
return rx;
}
self.outgoing
.send_request_to_connections(self.connection_ids.as_slice(), payload)
.await
}
pub(crate) async fn send_server_notification(&self, notification: ServerNotification) {
if self.connection_ids.is_empty() {
return;
}
self.outgoing
.send_server_notification_to_connections(self.connection_ids.as_slice(), notification)
.await;
}
pub(crate) async fn send_response<T: Serialize>(
&self,
request_id: ConnectionRequestId,
response: T,
) {
self.outgoing.send_response(request_id, response).await;
}
pub(crate) async fn send_error(
&self,
request_id: ConnectionRequestId,
error: JSONRPCErrorError,
) {
self.outgoing.send_error(request_id, error).await;
}
}
impl OutgoingMessageSender {
pub(crate) fn new(sender: mpsc::Sender<OutgoingEnvelope>) -> Self {
Self {
next_server_request_id: AtomicI64::new(0),
sender,
request_id_to_callback: Mutex::new(HashMap::new()),
}
}
pub(crate) async fn send_request_to_connections(
&self,
connection_ids: &[ConnectionId],
request: ServerRequestPayload,
) -> oneshot::Receiver<Result> {
let (_id, rx) = self
.send_request_with_id_to_connections(connection_ids, request)
.await;
let (_id, rx) = self.send_request_with_id(request).await;
rx
}
@@ -129,15 +61,7 @@ impl OutgoingMessageSender {
&self,
request: ServerRequestPayload,
) -> (RequestId, oneshot::Receiver<Result>) {
self.send_request_with_id_to_connections(&[], request).await
}
async fn send_request_with_id_to_connections(
&self,
connection_ids: &[ConnectionId],
request: ServerRequestPayload,
) -> (RequestId, oneshot::Receiver<Result>) {
let id = RequestId::Integer(self.next_server_request_id.fetch_add(1, Ordering::Relaxed));
let id = RequestId::Integer(self.next_request_id.fetch_add(1, Ordering::Relaxed));
let outgoing_message_id = id.clone();
let (tx_approve, rx_approve) = oneshot::channel();
{
@@ -147,34 +71,7 @@ impl OutgoingMessageSender {
let outgoing_message =
OutgoingMessage::Request(request.request_with_id(outgoing_message_id.clone()));
let send_result = if connection_ids.is_empty() {
self.sender
.send(OutgoingEnvelope::Broadcast {
message: outgoing_message,
})
.await
} else {
let mut send_error = None;
for connection_id in connection_ids {
if let Err(err) = self
.sender
.send(OutgoingEnvelope::ToConnection {
connection_id: *connection_id,
message: outgoing_message.clone(),
})
.await
{
send_error = Some(err);
break;
}
}
match send_error {
Some(err) => Err(err),
None => Ok(()),
}
};
if let Err(err) = send_result {
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send request {outgoing_message_id:?} to client: {err:?}");
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove(&outgoing_message_id);
@@ -224,31 +121,17 @@ impl OutgoingMessageSender {
entry.is_some()
}
pub(crate) async fn send_response<T: Serialize>(
&self,
request_id: ConnectionRequestId,
response: T,
) {
pub(crate) async fn send_response<T: Serialize>(&self, id: RequestId, response: T) {
match serde_json::to_value(response) {
Ok(result) => {
let outgoing_message = OutgoingMessage::Response(OutgoingResponse {
id: request_id.request_id,
result,
});
if let Err(err) = self
.sender
.send(OutgoingEnvelope::ToConnection {
connection_id: request_id.connection_id,
message: outgoing_message,
})
.await
{
let outgoing_message = OutgoingMessage::Response(OutgoingResponse { id, result });
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send response to client: {err:?}");
}
}
Err(err) => {
self.send_error(
request_id,
id,
JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to serialize response: {err}"),
@@ -261,91 +144,37 @@ impl OutgoingMessageSender {
}
pub(crate) async fn send_server_notification(&self, notification: ServerNotification) {
self.send_server_notification_to_connections(&[], notification)
.await;
}
pub(crate) async fn send_server_notification_to_connections(
&self,
connection_ids: &[ConnectionId],
notification: ServerNotification,
) {
let outgoing_message = OutgoingMessage::AppServerNotification(notification);
if connection_ids.is_empty() {
if let Err(err) = self
.sender
.send(OutgoingEnvelope::Broadcast {
message: outgoing_message,
})
.await
{
warn!("failed to send server notification to client: {err:?}");
}
let method = notification.to_string();
if self.should_skip_notification(&method).await {
return;
}
for connection_id in connection_ids {
if let Err(err) = self
.sender
.send(OutgoingEnvelope::ToConnection {
connection_id: *connection_id,
message: outgoing_message.clone(),
})
.await
{
warn!("failed to send server notification to client: {err:?}");
}
}
}
pub(crate) async fn send_notification_to_connections(
&self,
connection_ids: &[ConnectionId],
notification: OutgoingNotification,
) {
let outgoing_message = OutgoingMessage::Notification(notification);
if connection_ids.is_empty() {
if let Err(err) = self
.sender
.send(OutgoingEnvelope::Broadcast {
message: outgoing_message,
})
.await
{
warn!("failed to send notification to client: {err:?}");
}
return;
}
for connection_id in connection_ids {
if let Err(err) = self
.sender
.send(OutgoingEnvelope::ToConnection {
connection_id: *connection_id,
message: outgoing_message.clone(),
})
.await
{
warn!("failed to send notification to client: {err:?}");
}
}
}
pub(crate) async fn send_error(
&self,
request_id: ConnectionRequestId,
error: JSONRPCErrorError,
) {
let outgoing_message = OutgoingMessage::Error(OutgoingError {
id: request_id.request_id,
error,
});
if let Err(err) = self
.sender
.send(OutgoingEnvelope::ToConnection {
connection_id: request_id.connection_id,
message: outgoing_message,
})
.send(OutgoingMessage::AppServerNotification(notification))
.await
{
warn!("failed to send server notification to client: {err:?}");
}
}
/// All notifications should be migrated to [`ServerNotification`] and
/// [`OutgoingMessage::Notification`] should be removed.
pub(crate) async fn send_notification(&self, notification: OutgoingNotification) {
if self
.should_skip_notification(notification.method.as_str())
.await
{
return;
}
let outgoing_message = OutgoingMessage::Notification(notification);
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send notification to client: {err:?}");
}
}
pub(crate) async fn send_error(&self, id: RequestId, error: JSONRPCErrorError) {
let outgoing_message = OutgoingMessage::Error(OutgoingError { id, error });
if let Err(err) = self.sender.send(outgoing_message).await {
warn!("failed to send error to client: {err:?}");
}
}
@@ -385,8 +214,6 @@ pub(crate) struct OutgoingError {
#[cfg(test)]
mod tests {
use std::time::Duration;
use codex_app_server_protocol::AccountLoginCompletedNotification;
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
@@ -397,7 +224,6 @@ mod tests {
use codex_app_server_protocol::RateLimitWindow;
use pretty_assertions::assert_eq;
use serde_json::json;
use tokio::time::timeout;
use uuid::Uuid;
use super::*;
@@ -457,8 +283,6 @@ mod tests {
let notification =
ServerNotification::AccountRateLimitsUpdated(AccountRateLimitsUpdatedNotification {
rate_limits: RateLimitSnapshot {
limit_id: Some("codex".to_string()),
limit_name: None,
primary: Some(RateLimitWindow {
used_percent: 25,
window_duration_mins: Some(15),
@@ -475,9 +299,7 @@ mod tests {
json!({
"method": "account/rateLimits/updated",
"params": {
"rateLimits": {
"limitId": "codex",
"limitName": null,
"rateLimits": {
"primary": {
"usedPercent": 25,
"windowDurationMins": 15,
@@ -538,75 +360,4 @@ mod tests {
"ensure the notification serializes correctly"
);
}
#[tokio::test]
async fn send_response_routes_to_target_connection() {
let (tx, mut rx) = mpsc::channel::<OutgoingEnvelope>(4);
let outgoing = OutgoingMessageSender::new(tx);
let request_id = ConnectionRequestId {
connection_id: ConnectionId(42),
request_id: RequestId::Integer(7),
};
outgoing
.send_response(request_id.clone(), json!({ "ok": true }))
.await;
let envelope = timeout(Duration::from_secs(1), rx.recv())
.await
.expect("should receive envelope before timeout")
.expect("channel should contain one message");
match envelope {
OutgoingEnvelope::ToConnection {
connection_id,
message,
} => {
assert_eq!(connection_id, ConnectionId(42));
let OutgoingMessage::Response(response) = message else {
panic!("expected response message");
};
assert_eq!(response.id, request_id.request_id);
assert_eq!(response.result, json!({ "ok": true }));
}
other => panic!("expected targeted response envelope, got: {other:?}"),
}
}
#[tokio::test]
async fn send_error_routes_to_target_connection() {
let (tx, mut rx) = mpsc::channel::<OutgoingEnvelope>(4);
let outgoing = OutgoingMessageSender::new(tx);
let request_id = ConnectionRequestId {
connection_id: ConnectionId(9),
request_id: RequestId::Integer(3),
};
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: "boom".to_string(),
data: None,
};
outgoing.send_error(request_id.clone(), error.clone()).await;
let envelope = timeout(Duration::from_secs(1), rx.recv())
.await
.expect("should receive envelope before timeout")
.expect("channel should contain one message");
match envelope {
OutgoingEnvelope::ToConnection {
connection_id,
message,
} => {
assert_eq!(connection_id, ConnectionId(9));
let OutgoingMessage::Error(outgoing_error) = message else {
panic!("expected error message");
};
assert_eq!(outgoing_error.id, RequestId::Integer(3));
assert_eq!(outgoing_error.error, error);
}
other => panic!("expected targeted error envelope, got: {other:?}"),
}
}
}

View File

@@ -1,221 +0,0 @@
use crate::outgoing_message::ConnectionId;
use crate::outgoing_message::ConnectionRequestId;
use codex_app_server_protocol::TurnError;
use codex_core::CodexThread;
use codex_protocol::ThreadId;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::Weak;
use tokio::sync::Mutex;
use tokio::sync::oneshot;
use uuid::Uuid;
type PendingInterruptQueue = Vec<(
ConnectionRequestId,
crate::codex_message_processor::ApiVersion,
)>;
/// Per-conversation accumulation of the latest states e.g. error message while a turn runs.
#[derive(Default, Clone)]
pub(crate) struct TurnSummary {
pub(crate) file_change_started: HashSet<String>,
pub(crate) last_error: Option<TurnError>,
}
#[derive(Default)]
pub(crate) struct ThreadState {
pub(crate) pending_interrupts: PendingInterruptQueue,
pub(crate) pending_rollbacks: Option<ConnectionRequestId>,
pub(crate) turn_summary: TurnSummary,
pub(crate) cancel_tx: Option<oneshot::Sender<()>>,
pub(crate) experimental_raw_events: bool,
listener_thread: Option<Weak<CodexThread>>,
subscribed_connections: HashSet<ConnectionId>,
}
impl ThreadState {
pub(crate) fn listener_matches(&self, conversation: &Arc<CodexThread>) -> bool {
self.listener_thread
.as_ref()
.and_then(Weak::upgrade)
.is_some_and(|existing| Arc::ptr_eq(&existing, conversation))
}
pub(crate) fn set_listener(
&mut self,
cancel_tx: oneshot::Sender<()>,
conversation: &Arc<CodexThread>,
) {
if let Some(previous) = self.cancel_tx.replace(cancel_tx) {
let _ = previous.send(());
}
self.listener_thread = Some(Arc::downgrade(conversation));
}
pub(crate) fn clear_listener(&mut self) {
if let Some(cancel_tx) = self.cancel_tx.take() {
let _ = cancel_tx.send(());
}
self.listener_thread = None;
}
pub(crate) fn add_connection(&mut self, connection_id: ConnectionId) {
self.subscribed_connections.insert(connection_id);
}
pub(crate) fn remove_connection(&mut self, connection_id: ConnectionId) {
self.subscribed_connections.remove(&connection_id);
}
pub(crate) fn subscribed_connection_ids(&self) -> Vec<ConnectionId> {
self.subscribed_connections.iter().copied().collect()
}
pub(crate) fn set_experimental_raw_events(&mut self, enabled: bool) {
self.experimental_raw_events = enabled;
}
}
#[derive(Clone, Copy)]
struct SubscriptionState {
thread_id: ThreadId,
connection_id: ConnectionId,
}
#[derive(Default)]
pub(crate) struct ThreadStateManager {
thread_states: HashMap<ThreadId, Arc<Mutex<ThreadState>>>,
subscription_state_by_id: HashMap<Uuid, SubscriptionState>,
thread_ids_by_connection: HashMap<ConnectionId, HashSet<ThreadId>>,
}
impl ThreadStateManager {
pub(crate) fn new() -> Self {
Self::default()
}
pub(crate) fn thread_state(&mut self, thread_id: ThreadId) -> Arc<Mutex<ThreadState>> {
self.thread_states
.entry(thread_id)
.or_insert_with(|| Arc::new(Mutex::new(ThreadState::default())))
.clone()
}
pub(crate) async fn remove_listener(&mut self, subscription_id: Uuid) -> Option<ThreadId> {
let subscription_state = self.subscription_state_by_id.remove(&subscription_id)?;
let thread_id = subscription_state.thread_id;
let connection_still_subscribed_to_thread =
self.subscription_state_by_id.values().any(|state| {
state.thread_id == thread_id
&& state.connection_id == subscription_state.connection_id
});
if !connection_still_subscribed_to_thread {
let mut remove_connection_entry = false;
if let Some(thread_ids) = self
.thread_ids_by_connection
.get_mut(&subscription_state.connection_id)
{
thread_ids.remove(&thread_id);
remove_connection_entry = thread_ids.is_empty();
}
if remove_connection_entry {
self.thread_ids_by_connection
.remove(&subscription_state.connection_id);
}
if let Some(thread_state) = self.thread_states.get(&thread_id) {
thread_state
.lock()
.await
.remove_connection(subscription_state.connection_id);
}
}
if let Some(thread_state) = self.thread_states.get(&thread_id) {
let mut thread_state = thread_state.lock().await;
if thread_state.subscribed_connection_ids().is_empty() {
thread_state.clear_listener();
}
}
Some(thread_id)
}
pub(crate) async fn remove_thread_state(&mut self, thread_id: ThreadId) {
if let Some(thread_state) = self.thread_states.remove(&thread_id) {
thread_state.lock().await.clear_listener();
}
self.subscription_state_by_id
.retain(|_, state| state.thread_id != thread_id);
self.thread_ids_by_connection.retain(|_, thread_ids| {
thread_ids.remove(&thread_id);
!thread_ids.is_empty()
});
}
pub(crate) async fn set_listener(
&mut self,
subscription_id: Uuid,
thread_id: ThreadId,
connection_id: ConnectionId,
experimental_raw_events: bool,
) -> Arc<Mutex<ThreadState>> {
self.subscription_state_by_id.insert(
subscription_id,
SubscriptionState {
thread_id,
connection_id,
},
);
self.thread_ids_by_connection
.entry(connection_id)
.or_default()
.insert(thread_id);
let thread_state = self.thread_state(thread_id);
{
let mut thread_state_guard = thread_state.lock().await;
thread_state_guard.add_connection(connection_id);
thread_state_guard.set_experimental_raw_events(experimental_raw_events);
}
thread_state
}
pub(crate) async fn ensure_connection_subscribed(
&mut self,
thread_id: ThreadId,
connection_id: ConnectionId,
experimental_raw_events: bool,
) -> Arc<Mutex<ThreadState>> {
self.thread_ids_by_connection
.entry(connection_id)
.or_default()
.insert(thread_id);
let thread_state = self.thread_state(thread_id);
{
let mut thread_state_guard = thread_state.lock().await;
thread_state_guard.add_connection(connection_id);
if experimental_raw_events {
thread_state_guard.set_experimental_raw_events(true);
}
}
thread_state
}
pub(crate) async fn remove_connection(&mut self, connection_id: ConnectionId) {
let Some(thread_ids) = self.thread_ids_by_connection.remove(&connection_id) else {
return;
};
self.subscription_state_by_id
.retain(|_, state| state.connection_id != connection_id);
for thread_id in thread_ids {
if let Some(thread_state) = self.thread_states.get(&thread_id) {
let mut thread_state = thread_state.lock().await;
thread_state.remove_connection(connection_id);
if thread_state.subscribed_connection_ids().is_empty() {
thread_state.clear_listener();
}
}
}
}
}

View File

@@ -1,780 +0,0 @@
use crate::error_code::OVERLOADED_ERROR_CODE;
use crate::message_processor::ConnectionSessionState;
use crate::outgoing_message::ConnectionId;
use crate::outgoing_message::OutgoingEnvelope;
use crate::outgoing_message::OutgoingError;
use crate::outgoing_message::OutgoingMessage;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCMessage;
use futures::SinkExt;
use futures::StreamExt;
use owo_colors::OwoColorize;
use owo_colors::Stream;
use owo_colors::Style;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::io::{self};
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use tokio_tungstenite::accept_async;
use tokio_tungstenite::tungstenite::Message as WebSocketMessage;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
/// Size of the bounded channels used to communicate between tasks. The value
/// is a balance between throughput and memory usage - 128 messages should be
/// plenty for an interactive CLI.
pub(crate) const CHANNEL_CAPACITY: usize = 128;
fn colorize(text: &str, style: Style) -> String {
text.if_supports_color(Stream::Stderr, |value| value.style(style))
.to_string()
}
#[allow(clippy::print_stderr)]
fn print_websocket_startup_banner(addr: SocketAddr) {
let title = colorize("codex app-server (WebSockets)", Style::new().bold().cyan());
let listening_label = colorize("listening on:", Style::new().dimmed());
let listen_url = colorize(&format!("ws://{addr}"), Style::new().green());
let note_label = colorize("note:", Style::new().dimmed());
eprintln!("{title}");
eprintln!(" {listening_label} {listen_url}");
if addr.ip().is_loopback() {
eprintln!(
" {note_label} binds localhost only (use SSH port-forwarding for remote access)"
);
} else {
eprintln!(
" {note_label} this is a raw WS server; consider running behind TLS/auth for real remote use"
);
}
}
#[allow(clippy::print_stderr)]
fn print_websocket_connection(peer_addr: SocketAddr) {
let connected_label = colorize("websocket client connected from", Style::new().dimmed());
eprintln!("{connected_label} {peer_addr}");
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AppServerTransport {
Stdio,
WebSocket { bind_address: SocketAddr },
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum AppServerTransportParseError {
UnsupportedListenUrl(String),
InvalidWebSocketListenUrl(String),
}
impl std::fmt::Display for AppServerTransportParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
f,
"unsupported --listen URL `{listen_url}`; expected `stdio://` or `ws://IP:PORT`"
),
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
f,
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
),
}
}
}
impl std::error::Error for AppServerTransportParseError {}
impl AppServerTransport {
pub const DEFAULT_LISTEN_URL: &'static str = "stdio://";
pub fn from_listen_url(listen_url: &str) -> Result<Self, AppServerTransportParseError> {
if listen_url == Self::DEFAULT_LISTEN_URL {
return Ok(Self::Stdio);
}
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
})?;
return Ok(Self::WebSocket { bind_address });
}
Err(AppServerTransportParseError::UnsupportedListenUrl(
listen_url.to_string(),
))
}
}
impl FromStr for AppServerTransport {
type Err = AppServerTransportParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_listen_url(s)
}
}
#[derive(Debug)]
pub(crate) enum TransportEvent {
ConnectionOpened {
connection_id: ConnectionId,
writer: mpsc::Sender<OutgoingMessage>,
},
ConnectionClosed {
connection_id: ConnectionId,
},
IncomingMessage {
connection_id: ConnectionId,
message: JSONRPCMessage,
},
}
pub(crate) struct ConnectionState {
pub(crate) outbound_initialized: Arc<AtomicBool>,
pub(crate) outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
pub(crate) session: ConnectionSessionState,
}
impl ConnectionState {
pub(crate) fn new(
outbound_initialized: Arc<AtomicBool>,
outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
) -> Self {
Self {
outbound_initialized,
outbound_opted_out_notification_methods,
session: ConnectionSessionState::default(),
}
}
}
pub(crate) struct OutboundConnectionState {
pub(crate) initialized: Arc<AtomicBool>,
pub(crate) opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
pub(crate) writer: mpsc::Sender<OutgoingMessage>,
}
impl OutboundConnectionState {
pub(crate) fn new(
writer: mpsc::Sender<OutgoingMessage>,
initialized: Arc<AtomicBool>,
opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
) -> Self {
Self {
initialized,
opted_out_notification_methods,
writer,
}
}
}
pub(crate) async fn start_stdio_connection(
transport_event_tx: mpsc::Sender<TransportEvent>,
stdio_handles: &mut Vec<JoinHandle<()>>,
) -> IoResult<()> {
let connection_id = ConnectionId(0);
let (writer_tx, mut writer_rx) = mpsc::channel::<OutgoingMessage>(CHANNEL_CAPACITY);
let writer_tx_for_reader = writer_tx.clone();
transport_event_tx
.send(TransportEvent::ConnectionOpened {
connection_id,
writer: writer_tx,
})
.await
.map_err(|_| std::io::Error::new(ErrorKind::BrokenPipe, "processor unavailable"))?;
let transport_event_tx_for_reader = transport_event_tx.clone();
stdio_handles.push(tokio::spawn(async move {
let stdin = io::stdin();
let reader = BufReader::new(stdin);
let mut lines = reader.lines();
loop {
match lines.next_line().await {
Ok(Some(line)) => {
if !forward_incoming_message(
&transport_event_tx_for_reader,
&writer_tx_for_reader,
connection_id,
&line,
)
.await
{
break;
}
}
Ok(None) => break,
Err(err) => {
error!("Failed reading stdin: {err}");
break;
}
}
}
let _ = transport_event_tx_for_reader
.send(TransportEvent::ConnectionClosed { connection_id })
.await;
debug!("stdin reader finished (EOF)");
}));
stdio_handles.push(tokio::spawn(async move {
let mut stdout = io::stdout();
while let Some(outgoing_message) = writer_rx.recv().await {
let Some(mut json) = serialize_outgoing_message(outgoing_message) else {
continue;
};
json.push('\n');
if let Err(err) = stdout.write_all(json.as_bytes()).await {
error!("Failed to write to stdout: {err}");
break;
}
}
info!("stdout writer exited (channel closed)");
}));
Ok(())
}
pub(crate) async fn start_websocket_acceptor(
bind_address: SocketAddr,
transport_event_tx: mpsc::Sender<TransportEvent>,
) -> IoResult<JoinHandle<()>> {
let listener = TcpListener::bind(bind_address).await?;
let local_addr = listener.local_addr()?;
print_websocket_startup_banner(local_addr);
info!("app-server websocket listening on ws://{local_addr}");
let connection_counter = Arc::new(AtomicU64::new(1));
Ok(tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, peer_addr)) => {
print_websocket_connection(peer_addr);
let connection_id =
ConnectionId(connection_counter.fetch_add(1, Ordering::Relaxed));
let transport_event_tx_for_connection = transport_event_tx.clone();
tokio::spawn(async move {
run_websocket_connection(
connection_id,
stream,
transport_event_tx_for_connection,
)
.await;
});
}
Err(err) => {
error!("failed to accept websocket connection: {err}");
}
}
}
}))
}
async fn run_websocket_connection(
connection_id: ConnectionId,
stream: TcpStream,
transport_event_tx: mpsc::Sender<TransportEvent>,
) {
let websocket_stream = match accept_async(stream).await {
Ok(stream) => stream,
Err(err) => {
warn!("failed to complete websocket handshake: {err}");
return;
}
};
let (writer_tx, mut writer_rx) = mpsc::channel::<OutgoingMessage>(CHANNEL_CAPACITY);
let writer_tx_for_reader = writer_tx.clone();
if transport_event_tx
.send(TransportEvent::ConnectionOpened {
connection_id,
writer: writer_tx,
})
.await
.is_err()
{
return;
}
let (mut websocket_writer, mut websocket_reader) = websocket_stream.split();
loop {
tokio::select! {
outgoing_message = writer_rx.recv() => {
let Some(outgoing_message) = outgoing_message else {
break;
};
let Some(json) = serialize_outgoing_message(outgoing_message) else {
continue;
};
if websocket_writer.send(WebSocketMessage::Text(json.into())).await.is_err() {
break;
}
}
incoming_message = websocket_reader.next() => {
match incoming_message {
Some(Ok(WebSocketMessage::Text(text))) => {
if !forward_incoming_message(
&transport_event_tx,
&writer_tx_for_reader,
connection_id,
&text,
)
.await
{
break;
}
}
Some(Ok(WebSocketMessage::Ping(payload))) => {
if websocket_writer.send(WebSocketMessage::Pong(payload)).await.is_err() {
break;
}
}
Some(Ok(WebSocketMessage::Pong(_))) => {}
Some(Ok(WebSocketMessage::Close(_))) | None => break,
Some(Ok(WebSocketMessage::Binary(_))) => {
warn!("dropping unsupported binary websocket message");
}
Some(Ok(WebSocketMessage::Frame(_))) => {}
Some(Err(err)) => {
warn!("websocket receive error: {err}");
break;
}
}
}
}
}
let _ = transport_event_tx
.send(TransportEvent::ConnectionClosed { connection_id })
.await;
}
async fn forward_incoming_message(
transport_event_tx: &mpsc::Sender<TransportEvent>,
writer: &mpsc::Sender<OutgoingMessage>,
connection_id: ConnectionId,
payload: &str,
) -> bool {
match serde_json::from_str::<JSONRPCMessage>(payload) {
Ok(message) => {
enqueue_incoming_message(transport_event_tx, writer, connection_id, message).await
}
Err(err) => {
error!("Failed to deserialize JSONRPCMessage: {err}");
true
}
}
}
async fn enqueue_incoming_message(
transport_event_tx: &mpsc::Sender<TransportEvent>,
writer: &mpsc::Sender<OutgoingMessage>,
connection_id: ConnectionId,
message: JSONRPCMessage,
) -> bool {
let event = TransportEvent::IncomingMessage {
connection_id,
message,
};
match transport_event_tx.try_send(event) {
Ok(()) => true,
Err(mpsc::error::TrySendError::Closed(_)) => false,
Err(mpsc::error::TrySendError::Full(TransportEvent::IncomingMessage {
connection_id,
message: JSONRPCMessage::Request(request),
})) => {
let overload_error = OutgoingMessage::Error(OutgoingError {
id: request.id,
error: JSONRPCErrorError {
code: OVERLOADED_ERROR_CODE,
message: "Server overloaded; retry later.".to_string(),
data: None,
},
});
match writer.try_send(overload_error) {
Ok(()) => true,
Err(mpsc::error::TrySendError::Closed(_)) => false,
Err(mpsc::error::TrySendError::Full(_overload_error)) => {
warn!(
"dropping overload response for connection {:?}: outbound queue is full",
connection_id
);
true
}
}
}
Err(mpsc::error::TrySendError::Full(event)) => transport_event_tx.send(event).await.is_ok(),
}
}
fn serialize_outgoing_message(outgoing_message: OutgoingMessage) -> Option<String> {
let value = match serde_json::to_value(outgoing_message) {
Ok(value) => value,
Err(err) => {
error!("Failed to convert OutgoingMessage to JSON value: {err}");
return None;
}
};
match serde_json::to_string(&value) {
Ok(json) => Some(json),
Err(err) => {
error!("Failed to serialize JSONRPCMessage: {err}");
None
}
}
}
fn should_skip_notification_for_connection(
connection_state: &OutboundConnectionState,
message: &OutgoingMessage,
) -> bool {
let Ok(opted_out_notification_methods) = connection_state.opted_out_notification_methods.read()
else {
warn!("failed to read outbound opted-out notifications");
return false;
};
match message {
OutgoingMessage::AppServerNotification(notification) => {
let method = notification.to_string();
opted_out_notification_methods.contains(method.as_str())
}
OutgoingMessage::Notification(notification) => {
opted_out_notification_methods.contains(notification.method.as_str())
}
_ => false,
}
}
pub(crate) async fn route_outgoing_envelope(
connections: &mut HashMap<ConnectionId, OutboundConnectionState>,
envelope: OutgoingEnvelope,
) -> Vec<ConnectionId> {
let mut disconnected = Vec::new();
match envelope {
OutgoingEnvelope::ToConnection {
connection_id,
message,
} => {
let Some(connection_state) = connections.get(&connection_id) else {
warn!(
"dropping message for disconnected connection: {:?}",
connection_id
);
return disconnected;
};
if should_skip_notification_for_connection(connection_state, &message) {
return disconnected;
}
if connection_state.writer.send(message).await.is_err() {
connections.remove(&connection_id);
disconnected.push(connection_id);
}
}
OutgoingEnvelope::Broadcast { message } => {
let target_connections: Vec<ConnectionId> = connections
.iter()
.filter_map(|(connection_id, connection_state)| {
if connection_state.initialized.load(Ordering::Acquire)
&& !should_skip_notification_for_connection(connection_state, &message)
{
Some(*connection_id)
} else {
None
}
})
.collect();
for connection_id in target_connections {
let Some(connection_state) = connections.get(&connection_id) else {
continue;
};
if connection_state.writer.send(message.clone()).await.is_err() {
connections.remove(&connection_id);
disconnected.push(connection_id);
}
}
}
}
disconnected
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error_code::OVERLOADED_ERROR_CODE;
use pretty_assertions::assert_eq;
use serde_json::json;
#[test]
fn app_server_transport_parses_stdio_listen_url() {
let transport = AppServerTransport::from_listen_url(AppServerTransport::DEFAULT_LISTEN_URL)
.expect("stdio listen URL should parse");
assert_eq!(transport, AppServerTransport::Stdio);
}
#[test]
fn app_server_transport_parses_websocket_listen_url() {
let transport = AppServerTransport::from_listen_url("ws://127.0.0.1:1234")
.expect("websocket listen URL should parse");
assert_eq!(
transport,
AppServerTransport::WebSocket {
bind_address: "127.0.0.1:1234".parse().expect("valid socket address"),
}
);
}
#[test]
fn app_server_transport_rejects_invalid_websocket_listen_url() {
let err = AppServerTransport::from_listen_url("ws://localhost:1234")
.expect_err("hostname bind address should be rejected");
assert_eq!(
err.to_string(),
"invalid websocket --listen URL `ws://localhost:1234`; expected `ws://IP:PORT`"
);
}
#[test]
fn app_server_transport_rejects_unsupported_listen_url() {
let err = AppServerTransport::from_listen_url("http://127.0.0.1:1234")
.expect_err("unsupported scheme should fail");
assert_eq!(
err.to_string(),
"unsupported --listen URL `http://127.0.0.1:1234`; expected `stdio://` or `ws://IP:PORT`"
);
}
#[tokio::test]
async fn enqueue_incoming_request_returns_overload_error_when_queue_is_full() {
let connection_id = ConnectionId(42);
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
let (writer_tx, mut writer_rx) = mpsc::channel(1);
let first_message =
JSONRPCMessage::Notification(codex_app_server_protocol::JSONRPCNotification {
method: "initialized".to_string(),
params: None,
});
transport_event_tx
.send(TransportEvent::IncomingMessage {
connection_id,
message: first_message.clone(),
})
.await
.expect("queue should accept first message");
let request = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest {
id: codex_app_server_protocol::RequestId::Integer(7),
method: "config/read".to_string(),
params: Some(json!({ "includeLayers": false })),
});
assert!(
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request).await
);
let queued_event = transport_event_rx
.recv()
.await
.expect("first event should stay queued");
match queued_event {
TransportEvent::IncomingMessage {
connection_id: queued_connection_id,
message,
} => {
assert_eq!(queued_connection_id, connection_id);
assert_eq!(message, first_message);
}
_ => panic!("expected queued incoming message"),
}
let overload = writer_rx
.recv()
.await
.expect("request should receive overload error");
let overload_json = serde_json::to_value(overload).expect("serialize overload error");
assert_eq!(
overload_json,
json!({
"id": 7,
"error": {
"code": OVERLOADED_ERROR_CODE,
"message": "Server overloaded; retry later."
}
})
);
}
#[tokio::test]
async fn enqueue_incoming_response_waits_instead_of_dropping_when_queue_is_full() {
let connection_id = ConnectionId(42);
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
let (writer_tx, _writer_rx) = mpsc::channel(1);
let first_message =
JSONRPCMessage::Notification(codex_app_server_protocol::JSONRPCNotification {
method: "initialized".to_string(),
params: None,
});
transport_event_tx
.send(TransportEvent::IncomingMessage {
connection_id,
message: first_message.clone(),
})
.await
.expect("queue should accept first message");
let response = JSONRPCMessage::Response(codex_app_server_protocol::JSONRPCResponse {
id: codex_app_server_protocol::RequestId::Integer(7),
result: json!({"ok": true}),
});
let transport_event_tx_for_enqueue = transport_event_tx.clone();
let writer_tx_for_enqueue = writer_tx.clone();
let enqueue_handle = tokio::spawn(async move {
enqueue_incoming_message(
&transport_event_tx_for_enqueue,
&writer_tx_for_enqueue,
connection_id,
response,
)
.await
});
let queued_event = transport_event_rx
.recv()
.await
.expect("first event should be dequeued");
match queued_event {
TransportEvent::IncomingMessage {
connection_id: queued_connection_id,
message,
} => {
assert_eq!(queued_connection_id, connection_id);
assert_eq!(message, first_message);
}
_ => panic!("expected queued incoming message"),
}
let enqueue_result = enqueue_handle.await.expect("enqueue task should not panic");
assert!(enqueue_result);
let forwarded_event = transport_event_rx
.recv()
.await
.expect("response should be forwarded instead of dropped");
match forwarded_event {
TransportEvent::IncomingMessage {
connection_id: queued_connection_id,
message:
JSONRPCMessage::Response(codex_app_server_protocol::JSONRPCResponse { id, result }),
} => {
assert_eq!(queued_connection_id, connection_id);
assert_eq!(id, codex_app_server_protocol::RequestId::Integer(7));
assert_eq!(result, json!({"ok": true}));
}
_ => panic!("expected forwarded response message"),
}
}
#[tokio::test]
async fn enqueue_incoming_request_does_not_block_when_writer_queue_is_full() {
let connection_id = ConnectionId(42);
let (transport_event_tx, _transport_event_rx) = mpsc::channel(1);
let (writer_tx, mut writer_rx) = mpsc::channel(1);
transport_event_tx
.send(TransportEvent::IncomingMessage {
connection_id,
message: JSONRPCMessage::Notification(
codex_app_server_protocol::JSONRPCNotification {
method: "initialized".to_string(),
params: None,
},
),
})
.await
.expect("transport queue should accept first message");
writer_tx
.send(OutgoingMessage::Notification(
crate::outgoing_message::OutgoingNotification {
method: "queued".to_string(),
params: None,
},
))
.await
.expect("writer queue should accept first message");
let request = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest {
id: codex_app_server_protocol::RequestId::Integer(7),
method: "config/read".to_string(),
params: Some(json!({ "includeLayers": false })),
});
let enqueue_result = tokio::time::timeout(
std::time::Duration::from_millis(100),
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request),
)
.await
.expect("enqueue should not block while writer queue is full");
assert!(enqueue_result);
let queued_outgoing = writer_rx
.recv()
.await
.expect("writer queue should still contain original message");
let queued_json = serde_json::to_value(queued_outgoing).expect("serialize queued message");
assert_eq!(queued_json, json!({ "method": "queued" }));
}
#[tokio::test]
async fn to_connection_notification_respects_opt_out_filters() {
let connection_id = ConnectionId(7);
let (writer_tx, mut writer_rx) = mpsc::channel(1);
let initialized = Arc::new(AtomicBool::new(true));
let opted_out_notification_methods = Arc::new(RwLock::new(HashSet::from([
"codex/event/task_started".to_string(),
])));
let mut connections = HashMap::new();
connections.insert(
connection_id,
OutboundConnectionState::new(writer_tx, initialized, opted_out_notification_methods),
);
let disconnected = route_outgoing_envelope(
&mut connections,
OutgoingEnvelope::ToConnection {
connection_id,
message: OutgoingMessage::Notification(
crate::outgoing_message::OutgoingNotification {
method: "codex/event/task_started".to_string(),
params: None,
},
),
},
)
.await;
assert_eq!(disconnected, Vec::<ConnectionId>::new());
assert!(
writer_rx.try_recv().is_err(),
"opted-out notification should be dropped"
);
}
}

View File

@@ -12,7 +12,7 @@ anyhow = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true }
codex-core = { workspace = true, features = ["test-support"] }
codex-protocol = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
serde = { workspace = true }

View File

@@ -174,7 +174,7 @@ impl McpProcess {
client_info,
Some(InitializeCapabilities {
experimental_api: true,
..Default::default()
opt_out_notification_methods: None,
}),
)
.await

View File

@@ -1,6 +1,6 @@
use chrono::DateTime;
use chrono::Utc;
use codex_core::test_support::all_model_presets;
use codex_core::models_manager::model_presets::all_model_presets;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
@@ -40,7 +40,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
prefer_websockets: false,
}
}

View File

@@ -560,7 +560,6 @@ fn append_rollout_turn_context(path: &Path, timestamp: &str, model: &str) -> std
let line = RolloutLine {
timestamp: timestamp.to_string(),
item: RolloutItem::TurnContext(TurnContextItem {
turn_id: None,
cwd: PathBuf::from("/"),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::DangerFullAccess,

View File

@@ -36,9 +36,8 @@ async fn app_server_default_analytics_disabled_without_flag() -> Result<()> {
.map_err(|err| anyhow::anyhow!(err.to_string()))?;
// With analytics unset in the config and the default flag is false, metrics are disabled.
// A provider may still exist for non-metrics telemetry, so check metrics specifically.
let has_metrics = provider.as_ref().and_then(|otel| otel.metrics()).is_some();
assert_eq!(has_metrics, false);
// No provider is built.
assert_eq!(provider.is_none(), true);
Ok(())
}

View File

@@ -15,7 +15,7 @@ use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::CollaborationModeListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::test_support::builtin_collaboration_mode_presets;
use codex_core::models_manager::test_builtin_collaboration_mode_presets;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use pretty_assertions::assert_eq;
@@ -55,7 +55,7 @@ async fn list_collaboration_modes_returns_presets() -> Result<()> {
/// If the defaults change in the app server, this helper should be updated alongside the
/// contract, or the test will fail in ways that imply a regression in the API.
fn plan_preset() -> CollaborationModeMask {
let presets = builtin_collaboration_mode_presets();
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Plan))
@@ -64,7 +64,7 @@ fn plan_preset() -> CollaborationModeMask {
/// Builds the default preset that the list response is expected to return.
fn default_preset() -> CollaborationModeMask {
let presets = builtin_collaboration_mode_presets();
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Default))

View File

@@ -560,22 +560,9 @@ fn assert_layers_user_then_optional_system(
layers: &[codex_app_server_protocol::ConfigLayer],
user_file: AbsolutePathBuf,
) -> Result<()> {
let mut first_index = 0;
if matches!(
layers.first().map(|layer| &layer.name),
Some(ConfigLayerSource::LegacyManagedConfigTomlFromMdm)
) {
first_index = 1;
}
assert_eq!(layers.len(), first_index + 2);
assert_eq!(
layers[first_index].name,
ConfigLayerSource::User { file: user_file }
);
assert!(matches!(
layers[first_index + 1].name,
ConfigLayerSource::System { .. }
));
assert_eq!(layers.len(), 2);
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
assert!(matches!(layers[1].name, ConfigLayerSource::System { .. }));
Ok(())
}
@@ -584,25 +571,12 @@ fn assert_layers_managed_user_then_optional_system(
managed_file: AbsolutePathBuf,
user_file: AbsolutePathBuf,
) -> Result<()> {
let mut first_index = 0;
if matches!(
layers.first().map(|layer| &layer.name),
Some(ConfigLayerSource::LegacyManagedConfigTomlFromMdm)
) {
first_index = 1;
}
assert_eq!(layers.len(), first_index + 3);
assert_eq!(layers.len(), 3);
assert_eq!(
layers[first_index].name,
layers[0].name,
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
);
assert_eq!(
layers[first_index + 1].name,
ConfigLayerSource::User { file: user_file }
);
assert!(matches!(
layers[first_index + 2].name,
ConfigLayerSource::System { .. }
));
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
assert!(matches!(layers[2].name, ConfigLayerSource::System { .. }));
Ok(())
}

View File

@@ -1,263 +0,0 @@
use anyhow::Context;
use anyhow::Result;
use anyhow::bail;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use futures::SinkExt;
use futures::StreamExt;
use serde_json::json;
use std::net::SocketAddr;
use std::path::Path;
use std::process::Stdio;
use tempfile::TempDir;
use tokio::io::AsyncBufReadExt;
use tokio::process::Child;
use tokio::process::Command;
use tokio::time::Duration;
use tokio::time::Instant;
use tokio::time::sleep;
use tokio::time::timeout;
use tokio_tungstenite::MaybeTlsStream;
use tokio_tungstenite::WebSocketStream;
use tokio_tungstenite::connect_async;
use tokio_tungstenite::tungstenite::Message as WebSocketMessage;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(5);
type WsClient = WebSocketStream<MaybeTlsStream<tokio::net::TcpStream>>;
#[tokio::test]
async fn websocket_transport_routes_per_connection_handshake_and_responses() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(Vec::new()).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let bind_addr = reserve_local_addr()?;
let mut process = spawn_websocket_server(codex_home.path(), bind_addr).await?;
let mut ws1 = connect_websocket(bind_addr).await?;
let mut ws2 = connect_websocket(bind_addr).await?;
send_initialize_request(&mut ws1, 1, "ws_client_one").await?;
let first_init = read_response_for_id(&mut ws1, 1).await?;
assert_eq!(first_init.id, RequestId::Integer(1));
// Initialize responses are request-scoped and must not leak to other
// connections.
assert_no_message(&mut ws2, Duration::from_millis(250)).await?;
send_config_read_request(&mut ws2, 2).await?;
let not_initialized = read_error_for_id(&mut ws2, 2).await?;
assert_eq!(not_initialized.error.message, "Not initialized");
send_initialize_request(&mut ws2, 3, "ws_client_two").await?;
let second_init = read_response_for_id(&mut ws2, 3).await?;
assert_eq!(second_init.id, RequestId::Integer(3));
// Same request-id on different connections must route independently.
send_config_read_request(&mut ws1, 77).await?;
send_config_read_request(&mut ws2, 77).await?;
let ws1_config = read_response_for_id(&mut ws1, 77).await?;
let ws2_config = read_response_for_id(&mut ws2, 77).await?;
assert_eq!(ws1_config.id, RequestId::Integer(77));
assert_eq!(ws2_config.id, RequestId::Integer(77));
assert!(ws1_config.result.get("config").is_some());
assert!(ws2_config.result.get("config").is_some());
process
.kill()
.await
.context("failed to stop websocket app-server process")?;
Ok(())
}
async fn spawn_websocket_server(codex_home: &Path, bind_addr: SocketAddr) -> Result<Child> {
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
.context("should find app-server binary")?;
let mut cmd = Command::new(program);
cmd.arg("--listen")
.arg(format!("ws://{bind_addr}"))
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.env("CODEX_HOME", codex_home)
.env("RUST_LOG", "debug");
let mut process = cmd
.kill_on_drop(true)
.spawn()
.context("failed to spawn websocket app-server process")?;
if let Some(stderr) = process.stderr.take() {
let mut stderr_reader = tokio::io::BufReader::new(stderr).lines();
tokio::spawn(async move {
while let Ok(Some(line)) = stderr_reader.next_line().await {
eprintln!("[websocket app-server stderr] {line}");
}
});
}
Ok(process)
}
fn reserve_local_addr() -> Result<SocketAddr> {
let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
let addr = listener.local_addr()?;
drop(listener);
Ok(addr)
}
async fn connect_websocket(bind_addr: SocketAddr) -> Result<WsClient> {
let url = format!("ws://{bind_addr}");
let deadline = Instant::now() + Duration::from_secs(10);
loop {
match connect_async(&url).await {
Ok((stream, _response)) => return Ok(stream),
Err(err) => {
if Instant::now() >= deadline {
bail!("failed to connect websocket to {url}: {err}");
}
sleep(Duration::from_millis(50)).await;
}
}
}
}
async fn send_initialize_request(stream: &mut WsClient, id: i64, client_name: &str) -> Result<()> {
let params = InitializeParams {
client_info: ClientInfo {
name: client_name.to_string(),
title: Some("WebSocket Test Client".to_string()),
version: "0.1.0".to_string(),
},
capabilities: None,
};
send_request(
stream,
"initialize",
id,
Some(serde_json::to_value(params)?),
)
.await
}
async fn send_config_read_request(stream: &mut WsClient, id: i64) -> Result<()> {
send_request(
stream,
"config/read",
id,
Some(json!({ "includeLayers": false })),
)
.await
}
async fn send_request(
stream: &mut WsClient,
method: &str,
id: i64,
params: Option<serde_json::Value>,
) -> Result<()> {
let message = JSONRPCMessage::Request(JSONRPCRequest {
id: RequestId::Integer(id),
method: method.to_string(),
params,
});
send_jsonrpc(stream, message).await
}
async fn send_jsonrpc(stream: &mut WsClient, message: JSONRPCMessage) -> Result<()> {
let payload = serde_json::to_string(&message)?;
stream
.send(WebSocketMessage::Text(payload.into()))
.await
.context("failed to send websocket frame")
}
async fn read_response_for_id(stream: &mut WsClient, id: i64) -> Result<JSONRPCResponse> {
let target_id = RequestId::Integer(id);
loop {
let message = read_jsonrpc_message(stream).await?;
if let JSONRPCMessage::Response(response) = message
&& response.id == target_id
{
return Ok(response);
}
}
}
async fn read_error_for_id(stream: &mut WsClient, id: i64) -> Result<JSONRPCError> {
let target_id = RequestId::Integer(id);
loop {
let message = read_jsonrpc_message(stream).await?;
if let JSONRPCMessage::Error(err) = message
&& err.id == target_id
{
return Ok(err);
}
}
}
async fn read_jsonrpc_message(stream: &mut WsClient) -> Result<JSONRPCMessage> {
loop {
let frame = timeout(DEFAULT_READ_TIMEOUT, stream.next())
.await
.context("timed out waiting for websocket frame")?
.context("websocket stream ended unexpectedly")?
.context("failed to read websocket frame")?;
match frame {
WebSocketMessage::Text(text) => return Ok(serde_json::from_str(text.as_ref())?),
WebSocketMessage::Ping(payload) => {
stream.send(WebSocketMessage::Pong(payload)).await?;
}
WebSocketMessage::Pong(_) => {}
WebSocketMessage::Close(frame) => {
bail!("websocket closed unexpectedly: {frame:?}")
}
WebSocketMessage::Binary(_) => bail!("unexpected binary websocket frame"),
WebSocketMessage::Frame(_) => {}
}
}
}
async fn assert_no_message(stream: &mut WsClient, wait_for: Duration) -> Result<()> {
match timeout(wait_for, stream.next()).await {
Ok(Some(Ok(frame))) => bail!("unexpected frame while waiting for silence: {frame:?}"),
Ok(Some(Err(err))) => bail!("unexpected websocket read error: {err}"),
Ok(None) => bail!("websocket closed unexpectedly while waiting for silence"),
Err(_) => Ok(()),
}
}
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -4,7 +4,6 @@ mod app_list;
mod collaboration_mode_list;
mod compaction;
mod config_rpc;
mod connection_handling_websocket;
mod dynamic_tools;
mod experimental_api;
mod experimental_feature_list;

View File

@@ -117,23 +117,7 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
"reset_after_seconds": 43200,
"reset_at": secondary_reset_timestamp,
}
},
"additional_rate_limits": [
{
"limit_name": "codex_other",
"metered_feature": "codex_other",
"rate_limit": {
"allowed": true,
"limit_reached": false,
"primary_window": {
"used_percent": 88,
"limit_window_seconds": 1800,
"reset_after_seconds": 600,
"reset_at": 1735693200
}
}
}
]
}
});
Mock::given(method("GET"))
@@ -159,8 +143,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
let expected = GetAccountRateLimitsResponse {
rate_limits: RateLimitSnapshot {
limit_id: Some("codex".to_string()),
limit_name: None,
primary: Some(RateLimitWindow {
used_percent: 42,
window_duration_mins: Some(60),
@@ -174,46 +156,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
rate_limits_by_limit_id: Some(
[
(
"codex".to_string(),
RateLimitSnapshot {
limit_id: Some("codex".to_string()),
limit_name: None,
primary: Some(RateLimitWindow {
used_percent: 42,
window_duration_mins: Some(60),
resets_at: Some(primary_reset_timestamp),
}),
secondary: Some(RateLimitWindow {
used_percent: 5,
window_duration_mins: Some(1440),
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
),
(
"codex_other".to_string(),
RateLimitSnapshot {
limit_id: Some("codex_other".to_string()),
limit_name: Some("codex_other".to_string()),
primary: Some(RateLimitWindow {
used_percent: 88,
window_duration_mins: Some(30),
resets_at: Some(1735693200),
}),
secondary: None,
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
),
]
.into_iter()
.collect(),
),
};
assert_eq!(received, expected);

View File

@@ -5,6 +5,8 @@ use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_shell_command_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::CommandExecutionApprovalDecision;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCError;
@@ -136,7 +138,6 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
}
#[tokio::test]
#[ignore = "TODO(owenlin0): flaky"]
async fn review_start_exec_approval_item_id_matches_command_execution_item() -> Result<()> {
let responses = vec![
create_shell_command_sse_response(
@@ -209,7 +210,9 @@ async fn review_start_exec_approval_item_id_matches_command_execution_item() ->
mcp.send_response(
request_id,
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
serde_json::to_value(CommandExecutionRequestApprovalResponse {
decision: CommandExecutionApprovalDecision::Accept,
})?,
)
.await?;
timeout(

View File

@@ -1,7 +1,9 @@
use crate::types::CodeTaskDetailsResponse;
use crate::types::ConfigFileResponse;
use crate::types::CreditStatusDetails;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
use crate::types::RateLimitWindowSnapshot;
use crate::types::TurnAttemptsSiblingTurnsResponse;
use anyhow::Result;
use codex_core::auth::CodexAuth;
@@ -158,15 +160,6 @@ impl Client {
}
pub async fn get_rate_limits(&self) -> Result<RateLimitSnapshot> {
let snapshots = self.get_rate_limits_many().await?;
let preferred = snapshots
.iter()
.find(|snapshot| snapshot.limit_id.as_deref() == Some("codex"))
.cloned();
Ok(preferred.unwrap_or_else(|| snapshots[0].clone()))
}
pub async fn get_rate_limits_many(&self) -> Result<Vec<RateLimitSnapshot>> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/usage", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/usage", self.base_url),
@@ -174,7 +167,7 @@ impl Client {
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
let payload: RateLimitStatusPayload = self.decode_json(&url, &ct, &body)?;
Ok(Self::rate_limit_snapshots_from_payload(payload))
Ok(Self::rate_limit_snapshot_from_payload(payload))
}
pub async fn list_tasks(
@@ -302,59 +295,35 @@ impl Client {
}
// rate limit helpers
fn rate_limit_snapshots_from_payload(
payload: RateLimitStatusPayload,
) -> Vec<RateLimitSnapshot> {
let plan_type = Some(Self::map_plan_type(payload.plan_type));
let mut snapshots = vec![Self::make_rate_limit_snapshot(
Some("codex".to_string()),
None,
payload.rate_limit.flatten().map(|details| *details),
payload.credits.flatten().map(|details| *details),
plan_type,
)];
if let Some(additional) = payload.additional_rate_limits.flatten() {
snapshots.extend(additional.into_iter().map(|details| {
Self::make_rate_limit_snapshot(
Some(details.metered_feature),
Some(details.limit_name),
details.rate_limit.flatten().map(|rate_limit| *rate_limit),
None,
plan_type,
)
}));
}
snapshots
}
fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot {
let rate_limit_details = payload
.rate_limit
.and_then(|inner| inner.map(|boxed| *boxed));
fn make_rate_limit_snapshot(
limit_id: Option<String>,
limit_name: Option<String>,
rate_limit: Option<crate::types::RateLimitStatusDetails>,
credits: Option<crate::types::CreditStatusDetails>,
plan_type: Option<AccountPlanType>,
) -> RateLimitSnapshot {
let (primary, secondary) = match rate_limit {
Some(details) => (
let (primary, secondary) = if let Some(details) = rate_limit_details {
(
Self::map_rate_limit_window(details.primary_window),
Self::map_rate_limit_window(details.secondary_window),
),
None => (None, None),
)
} else {
(None, None)
};
RateLimitSnapshot {
limit_id,
limit_name,
primary,
secondary,
credits: Self::map_credits(credits),
plan_type,
credits: Self::map_credits(payload.credits),
plan_type: Some(Self::map_plan_type(payload.plan_type)),
}
}
fn map_rate_limit_window(
window: Option<Option<Box<crate::types::RateLimitWindowSnapshot>>>,
window: Option<Option<Box<RateLimitWindowSnapshot>>>,
) -> Option<RateLimitWindow> {
let snapshot = window.flatten().map(|details| *details)?;
let snapshot = match window {
Some(Some(snapshot)) => *snapshot,
_ => return None,
};
let used_percent = f64::from(snapshot.used_percent);
let window_minutes = Self::window_minutes_from_seconds(snapshot.limit_window_seconds);
@@ -366,13 +335,16 @@ impl Client {
})
}
fn map_credits(credits: Option<crate::types::CreditStatusDetails>) -> Option<CreditsSnapshot> {
let details = credits?;
fn map_credits(credits: Option<Option<Box<CreditStatusDetails>>>) -> Option<CreditsSnapshot> {
let details = match credits {
Some(Some(details)) => *details,
_ => return None,
};
Some(CreditsSnapshot {
has_credits: details.has_credits,
unlimited: details.unlimited,
balance: details.balance.flatten(),
balance: details.balance.and_then(|inner| inner),
})
}
@@ -402,142 +374,3 @@ impl Client {
Some((seconds_i64 + 59) / 60)
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn usage_payload_maps_primary_and_additional_rate_limits() {
let payload = RateLimitStatusPayload {
plan_type: crate::types::PlanType::Pro,
rate_limit: Some(Some(Box::new(crate::types::RateLimitStatusDetails {
primary_window: Some(Some(Box::new(crate::types::RateLimitWindowSnapshot {
used_percent: 42,
limit_window_seconds: 300,
reset_after_seconds: 0,
reset_at: 123,
}))),
secondary_window: Some(Some(Box::new(crate::types::RateLimitWindowSnapshot {
used_percent: 84,
limit_window_seconds: 3600,
reset_after_seconds: 0,
reset_at: 456,
}))),
..Default::default()
}))),
additional_rate_limits: Some(Some(vec![crate::types::AdditionalRateLimitDetails {
limit_name: "codex_other".to_string(),
metered_feature: "codex_other".to_string(),
rate_limit: Some(Some(Box::new(crate::types::RateLimitStatusDetails {
primary_window: Some(Some(Box::new(crate::types::RateLimitWindowSnapshot {
used_percent: 70,
limit_window_seconds: 900,
reset_after_seconds: 0,
reset_at: 789,
}))),
secondary_window: None,
..Default::default()
}))),
}])),
credits: Some(Some(Box::new(crate::types::CreditStatusDetails {
has_credits: true,
unlimited: false,
balance: Some(Some("9.99".to_string())),
..Default::default()
}))),
};
let snapshots = Client::rate_limit_snapshots_from_payload(payload);
assert_eq!(snapshots.len(), 2);
assert_eq!(snapshots[0].limit_id.as_deref(), Some("codex"));
assert_eq!(snapshots[0].limit_name, None);
assert_eq!(
snapshots[0].primary.as_ref().map(|w| w.used_percent),
Some(42.0)
);
assert_eq!(
snapshots[0].secondary.as_ref().map(|w| w.used_percent),
Some(84.0)
);
assert_eq!(
snapshots[0].credits,
Some(CreditsSnapshot {
has_credits: true,
unlimited: false,
balance: Some("9.99".to_string()),
})
);
assert_eq!(snapshots[0].plan_type, Some(AccountPlanType::Pro));
assert_eq!(snapshots[1].limit_id.as_deref(), Some("codex_other"));
assert_eq!(snapshots[1].limit_name.as_deref(), Some("codex_other"));
assert_eq!(
snapshots[1].primary.as_ref().map(|w| w.used_percent),
Some(70.0)
);
assert_eq!(snapshots[1].credits, None);
assert_eq!(snapshots[1].plan_type, Some(AccountPlanType::Pro));
}
#[test]
fn usage_payload_maps_zero_rate_limit_when_primary_absent() {
let payload = RateLimitStatusPayload {
plan_type: crate::types::PlanType::Plus,
rate_limit: None,
additional_rate_limits: Some(Some(vec![crate::types::AdditionalRateLimitDetails {
limit_name: "codex_other".to_string(),
metered_feature: "codex_other".to_string(),
rate_limit: None,
}])),
credits: None,
};
let snapshots = Client::rate_limit_snapshots_from_payload(payload);
assert_eq!(snapshots.len(), 2);
assert_eq!(snapshots[0].limit_id.as_deref(), Some("codex"));
assert_eq!(snapshots[0].limit_name, None);
assert_eq!(snapshots[0].primary, None);
assert_eq!(snapshots[1].limit_id.as_deref(), Some("codex_other"));
assert_eq!(snapshots[1].limit_name.as_deref(), Some("codex_other"));
}
#[test]
fn preferred_snapshot_selection_matches_get_rate_limits_behavior() {
let snapshots = [
RateLimitSnapshot {
limit_id: Some("codex_other".to_string()),
limit_name: Some("codex_other".to_string()),
primary: Some(RateLimitWindow {
used_percent: 90.0,
window_minutes: Some(60),
resets_at: Some(1),
}),
secondary: None,
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
RateLimitSnapshot {
limit_id: Some("codex".to_string()),
limit_name: Some("codex".to_string()),
primary: Some(RateLimitWindow {
used_percent: 10.0,
window_minutes: Some(60),
resets_at: Some(2),
}),
secondary: None,
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
];
let preferred = snapshots
.iter()
.find(|snapshot| snapshot.limit_id.as_deref() == Some("codex"))
.cloned()
.unwrap_or_else(|| snapshots[0].clone());
assert_eq!(preferred.limit_id.as_deref(), Some("codex"));
}
}

View File

@@ -1,4 +1,3 @@
pub use codex_backend_openapi_models::models::AdditionalRateLimitDetails;
pub use codex_backend_openapi_models::models::ConfigFileResponse;
pub use codex_backend_openapi_models::models::CreditStatusDetails;
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;

View File

@@ -10,8 +10,8 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-utils-cli = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }

View File

@@ -1,8 +1,8 @@
use std::path::PathBuf;
use clap::Parser;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_utils_cli::CliConfigOverrides;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use crate::get_task::GetTaskResponse;

View File

@@ -25,7 +25,7 @@ codex-app-server-test-client = { workspace = true }
codex-arg0 = { workspace = true }
codex-chatgpt = { workspace = true }
codex-cloud-tasks = { path = "../cloud-tasks" }
codex-utils-cli = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-exec = { workspace = true }
codex-execpolicy = { workspace = true }

View File

@@ -5,6 +5,7 @@ mod seatbelt;
use std::path::PathBuf;
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::exec_env::create_env;
@@ -13,7 +14,6 @@ use codex_core::landlock::spawn_command_under_linux_sandbox;
use codex_core::seatbelt::spawn_command_under_seatbelt;
use codex_core::spawn::StdioPolicy;
use codex_protocol::config_types::SandboxMode;
use codex_utils_cli::CliConfigOverrides;
use crate::LandlockCommand;
use crate::SeatbeltCommand;

View File

@@ -3,7 +3,7 @@ mod exit_status;
pub mod login;
use clap::Parser;
use codex_utils_cli::CliConfigOverrides;
use codex_common::CliConfigOverrides;
#[derive(Debug, Parser)]
pub struct SeatbeltCommand {

View File

@@ -1,3 +1,4 @@
use codex_common::CliConfigOverrides;
use codex_core::CodexAuth;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthMode;
@@ -9,7 +10,6 @@ use codex_login::ServerOptions;
use codex_login::run_device_code_login;
use codex_login::run_login_server;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_utils_cli::CliConfigOverrides;
use std::io::IsTerminal;
use std::io::Read;
use std::path::PathBuf;

View File

@@ -16,6 +16,7 @@ use codex_cli::login::run_login_with_chatgpt;
use codex_cli::login::run_login_with_device_code;
use codex_cli::login::run_logout;
use codex_cloud_tasks::Cli as CloudTasksCli;
use codex_common::CliConfigOverrides;
use codex_exec::Cli as ExecCli;
use codex_exec::Command as ExecCommand;
use codex_exec::ReviewArgs;
@@ -25,7 +26,6 @@ use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
use codex_tui::ExitReason;
use codex_tui::update_action::UpdateAction;
use codex_utils_cli::CliConfigOverrides;
use owo_colors::OwoColorize;
use std::io::IsTerminal;
use std::path::PathBuf;
@@ -306,15 +306,6 @@ struct AppServerCommand {
#[command(subcommand)]
subcommand: Option<AppServerSubcommand>,
/// Transport endpoint URL. Supported values: `stdio://` (default),
/// `ws://IP:PORT`.
#[arg(
long = "listen",
value_name = "URL",
default_value = codex_app_server::AppServerTransport::DEFAULT_LISTEN_URL
)]
listen: codex_app_server::AppServerTransport,
/// Controls whether analytics are enabled by default.
///
/// Analytics are disabled by default for app-server. Users have to explicitly opt in
@@ -596,13 +587,11 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
}
Some(Subcommand::AppServer(app_server_cli)) => match app_server_cli.subcommand {
None => {
let transport = app_server_cli.listen;
codex_app_server::run_main_with_transport(
codex_app_server::run_main(
codex_linux_sandbox_exe,
root_config_overrides,
codex_core::config_loader::LoaderOverrides::default(),
app_server_cli.analytics_default_enabled,
transport,
)
.await?;
}
@@ -1259,11 +1248,11 @@ mod tests {
assert_eq!(interactive.config_profile.as_deref(), Some("my-profile"));
assert_matches!(
interactive.sandbox_mode,
Some(codex_utils_cli::SandboxModeCliArg::WorkspaceWrite)
Some(codex_common::SandboxModeCliArg::WorkspaceWrite)
);
assert_matches!(
interactive.approval_policy,
Some(codex_utils_cli::ApprovalModeCliArg::OnRequest)
Some(codex_common::ApprovalModeCliArg::OnRequest)
);
assert!(interactive.full_auto);
assert_eq!(
@@ -1339,10 +1328,6 @@ mod tests {
fn app_server_analytics_default_disabled_without_flag() {
let app_server = app_server_from_args(["codex", "app-server"].as_ref());
assert!(!app_server.analytics_default_enabled);
assert_eq!(
app_server.listen,
codex_app_server::AppServerTransport::Stdio
);
}
#[test]
@@ -1352,36 +1337,6 @@ mod tests {
assert!(app_server.analytics_default_enabled);
}
#[test]
fn app_server_listen_websocket_url_parses() {
let app_server = app_server_from_args(
["codex", "app-server", "--listen", "ws://127.0.0.1:4500"].as_ref(),
);
assert_eq!(
app_server.listen,
codex_app_server::AppServerTransport::WebSocket {
bind_address: "127.0.0.1:4500".parse().expect("valid socket address"),
}
);
}
#[test]
fn app_server_listen_stdio_url_parses() {
let app_server =
app_server_from_args(["codex", "app-server", "--listen", "stdio://"].as_ref());
assert_eq!(
app_server.listen,
codex_app_server::AppServerTransport::Stdio
);
}
#[test]
fn app_server_listen_invalid_url_fails_to_parse() {
let parse_result =
MultitoolCli::try_parse_from(["codex", "app-server", "--listen", "http://foo"]);
assert!(parse_result.is_err());
}
#[test]
fn features_enable_parses_feature_name() {
let cli = MultitoolCli::try_parse_from(["codex", "features", "enable", "unified_exec"])

View File

@@ -5,6 +5,8 @@ use anyhow::Result;
use anyhow::anyhow;
use anyhow::bail;
use clap::ArgGroup;
use codex_common::CliConfigOverrides;
use codex_common::format_env_display::format_env_display;
use codex_core::config::Config;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::find_codex_home;
@@ -17,8 +19,6 @@ use codex_core::mcp::auth::oauth_login_support;
use codex_core::protocol::McpAuthStatus;
use codex_rmcp_client::delete_oauth_tokens;
use codex_rmcp_client::perform_oauth_login;
use codex_utils_cli::CliConfigOverrides;
use codex_utils_cli::format_env_display::format_env_display;
/// Subcommands:
/// - `list` — list configured servers (with `--json`)

View File

@@ -9,22 +9,17 @@ workspace = true
[dependencies]
async-trait = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
codex-backend-client = { workspace = true }
codex-core = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
hmac = "0.12.1"
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha2 = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["fs", "sync", "time"] }
tokio = { workspace = true, features = ["sync", "time"] }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
base64 = { workspace = true }
pretty_assertions = { workspace = true }
serde_json = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "test-util", "time"] }

View File

@@ -9,11 +9,6 @@
//! requirements before Codex will run.
use async_trait::async_trait;
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use chrono::DateTime;
use chrono::Duration as ChronoDuration;
use chrono::Utc;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
use codex_core::auth::CodexAuth;
@@ -21,118 +16,20 @@ use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_core::util::backoff;
use codex_protocol::account::PlanType;
use hmac::Hmac;
use hmac::Mac;
use serde::Deserialize;
use serde::Serialize;
use sha2::Sha256;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use thiserror::Error;
use tokio::fs;
use tokio::time::sleep;
use tokio::time::timeout;
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(15);
const CLOUD_REQUIREMENTS_MAX_ATTEMPTS: usize = 5;
const CLOUD_REQUIREMENTS_CACHE_FILENAME: &str = "cloud-requirements-cache.json";
const CLOUD_REQUIREMENTS_CACHE_TTL: Duration = Duration::from_secs(60 * 60);
const CLOUD_REQUIREMENTS_CACHE_WRITE_HMAC_KEY: &[u8] =
b"codex-cloud-requirements-cache-v3-064f8542-75b4-494c-a294-97d3ce597271";
const CLOUD_REQUIREMENTS_CACHE_READ_HMAC_KEYS: &[&[u8]] =
&[CLOUD_REQUIREMENTS_CACHE_WRITE_HMAC_KEY];
type HmacSha256 = Hmac<Sha256>;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum FetchCloudRequirementsStatus {
BackendClientInit,
Request,
}
#[derive(Clone, Debug, Eq, Error, PartialEq)]
enum CacheLoadStatus {
#[error("Skipping cloud requirements cache read because auth identity is incomplete.")]
AuthIdentityIncomplete,
#[error("Cloud requirements cache file not found.")]
CacheFileNotFound,
#[error("Failed to read cloud requirements cache: {0}.")]
CacheReadFailed(String),
#[error("Failed to parse cloud requirements cache: {0}.")]
CacheParseFailed(String),
#[error("Cloud requirements cache failed signature verification.")]
CacheSignatureInvalid,
#[error("Ignoring cloud requirements cache because cached identity is incomplete.")]
CacheIdentityIncomplete,
#[error("Ignoring cloud requirements cache for different auth identity.")]
CacheIdentityMismatch,
#[error("Cloud requirements cache expired.")]
CacheExpired,
}
#[derive(Debug, Error)]
enum CloudRequirementsError {
#[error("failed to write cloud requirements cache")]
CacheWrite,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
struct CloudRequirementsCacheFile {
signed_payload: CloudRequirementsCacheSignedPayload,
signature: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
struct CloudRequirementsCacheSignedPayload {
cached_at: DateTime<Utc>,
expires_at: DateTime<Utc>,
chatgpt_user_id: Option<String>,
account_id: Option<String>,
contents: Option<String>,
}
impl CloudRequirementsCacheSignedPayload {
fn requirements(&self) -> Option<ConfigRequirementsToml> {
self.contents
.as_deref()
.and_then(|contents| parse_cloud_requirements(contents).ok().flatten())
}
}
fn sign_cache_payload(payload_bytes: &[u8]) -> Option<String> {
let mut mac = HmacSha256::new_from_slice(CLOUD_REQUIREMENTS_CACHE_WRITE_HMAC_KEY).ok()?;
mac.update(payload_bytes);
let signature = mac.finalize().into_bytes();
Some(BASE64_STANDARD.encode(signature))
}
fn verify_cache_signature_with_key(
payload_bytes: &[u8],
signature_bytes: &[u8],
key: &[u8],
) -> bool {
let mut mac = match HmacSha256::new_from_slice(key) {
Ok(mac) => mac,
Err(_) => return false,
};
mac.update(payload_bytes);
mac.verify_slice(signature_bytes).is_ok()
}
fn verify_cache_signature(payload_bytes: &[u8], signature: &str) -> bool {
let signature_bytes = match BASE64_STANDARD.decode(signature) {
Ok(signature_bytes) => signature_bytes,
Err(_) => return false,
};
CLOUD_REQUIREMENTS_CACHE_READ_HMAC_KEYS
.iter()
.any(|key| verify_cache_signature_with_key(payload_bytes, &signature_bytes, key))
}
fn cache_payload_bytes(payload: &CloudRequirementsCacheSignedPayload) -> Option<Vec<u8>> {
serde_json::to_vec(&payload).ok()
Parse,
}
#[async_trait]
@@ -191,7 +88,6 @@ impl RequirementsFetcher for BackendRequirementsFetcher {
struct CloudRequirementsService {
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
cache_path: PathBuf,
timeout: Duration,
}
@@ -199,13 +95,11 @@ impl CloudRequirementsService {
fn new(
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
codex_home: PathBuf,
timeout: Duration,
) -> Self {
Self {
auth_manager,
fetcher,
cache_path: codex_home.join(CLOUD_REQUIREMENTS_CACHE_FILENAME),
timeout,
}
}
@@ -250,39 +144,27 @@ impl CloudRequirementsService {
{
return None;
}
let token_data = auth.get_token_data().ok();
let chatgpt_user_id = token_data
.as_ref()
.and_then(|token_data| token_data.id_token.chatgpt_user_id.as_deref());
let account_id = auth.get_account_id();
let account_id = account_id.as_deref();
match self.load_cache(chatgpt_user_id, account_id).await {
Ok(signed_payload) => {
tracing::info!(
path = %self.cache_path.display(),
"Using cached cloud requirements"
);
return signed_payload.requirements();
}
Err(cache_load_status) => {
self.log_cache_load_status(&cache_load_status);
}
}
self.fetch_with_retries(&auth, chatgpt_user_id, account_id)
.await?
self.fetch_with_retries(&auth).await
}
async fn fetch_with_retries(
&self,
auth: &CodexAuth,
chatgpt_user_id: Option<&str>,
account_id: Option<&str>,
) -> Option<Option<ConfigRequirementsToml>> {
async fn fetch_with_retries(&self, auth: &CodexAuth) -> Option<ConfigRequirementsToml> {
for attempt in 1..=CLOUD_REQUIREMENTS_MAX_ATTEMPTS {
let contents = match self.fetcher.fetch_requirements(auth).await {
Ok(contents) => contents,
let fetch_result = self
.fetcher
.fetch_requirements(auth)
.await
.and_then(|contents| {
contents.map_or(Ok(None), |contents| {
parse_cloud_requirements(&contents).map_err(|err| {
tracing::warn!(error = %err, "Failed to parse cloud requirements");
FetchCloudRequirementsStatus::Parse
})
})
});
match fetch_result {
Ok(requirements) => return requirements,
Err(status) => {
if attempt < CLOUD_REQUIREMENTS_MAX_ATTEMPTS {
tracing::warn!(
@@ -293,163 +175,21 @@ impl CloudRequirementsService {
);
sleep(backoff(attempt as u64)).await;
}
continue;
}
};
let requirements = match contents.as_deref() {
Some(contents) => match parse_cloud_requirements(contents) {
Ok(requirements) => requirements,
Err(err) => {
tracing::warn!(error = %err, "Failed to parse cloud requirements");
return None;
}
},
None => None,
};
if let Err(err) = self
.save_cache(
chatgpt_user_id.map(str::to_owned),
account_id.map(str::to_owned),
contents,
)
.await
{
tracing::warn!(error = %err, "Failed to write cloud requirements cache");
}
return Some(requirements);
}
None
}
async fn load_cache(
&self,
chatgpt_user_id: Option<&str>,
account_id: Option<&str>,
) -> Result<CloudRequirementsCacheSignedPayload, CacheLoadStatus> {
let (Some(chatgpt_user_id), Some(account_id)) = (chatgpt_user_id, account_id) else {
return Err(CacheLoadStatus::AuthIdentityIncomplete);
};
let bytes = match fs::read(&self.cache_path).await {
Ok(bytes) => bytes,
Err(err) => {
if err.kind() != std::io::ErrorKind::NotFound {
return Err(CacheLoadStatus::CacheReadFailed(err.to_string()));
}
return Err(CacheLoadStatus::CacheFileNotFound);
}
};
let cache_file: CloudRequirementsCacheFile = match serde_json::from_slice(&bytes) {
Ok(cache_file) => cache_file,
Err(err) => {
return Err(CacheLoadStatus::CacheParseFailed(err.to_string()));
}
};
let payload_bytes = match cache_payload_bytes(&cache_file.signed_payload) {
Some(payload_bytes) => payload_bytes,
None => {
return Err(CacheLoadStatus::CacheParseFailed(
"failed to serialize cache payload".to_string(),
));
}
};
if !verify_cache_signature(&payload_bytes, &cache_file.signature) {
return Err(CacheLoadStatus::CacheSignatureInvalid);
}
let (Some(cached_chatgpt_user_id), Some(cached_account_id)) = (
cache_file.signed_payload.chatgpt_user_id.as_deref(),
cache_file.signed_payload.account_id.as_deref(),
) else {
return Err(CacheLoadStatus::CacheIdentityIncomplete);
};
if cached_chatgpt_user_id != chatgpt_user_id || cached_account_id != account_id {
return Err(CacheLoadStatus::CacheIdentityMismatch);
}
if cache_file.signed_payload.expires_at <= Utc::now() {
return Err(CacheLoadStatus::CacheExpired);
}
Ok(cache_file.signed_payload)
}
fn log_cache_load_status(&self, status: &CacheLoadStatus) {
if matches!(status, CacheLoadStatus::CacheFileNotFound) {
return;
}
let warn = matches!(
status,
CacheLoadStatus::CacheReadFailed(_)
| CacheLoadStatus::CacheParseFailed(_)
| CacheLoadStatus::CacheSignatureInvalid
);
if warn {
tracing::warn!(path = %self.cache_path.display(), "{status}");
} else {
tracing::info!(path = %self.cache_path.display(), "{status}");
}
}
async fn save_cache(
&self,
chatgpt_user_id: Option<String>,
account_id: Option<String>,
contents: Option<String>,
) -> Result<(), CloudRequirementsError> {
let now = Utc::now();
let expires_at = now
.checked_add_signed(
ChronoDuration::from_std(CLOUD_REQUIREMENTS_CACHE_TTL)
.map_err(|_| CloudRequirementsError::CacheWrite)?,
)
.ok_or(CloudRequirementsError::CacheWrite)?;
let signed_payload = CloudRequirementsCacheSignedPayload {
cached_at: now,
expires_at,
chatgpt_user_id,
account_id,
contents,
};
let payload_bytes =
cache_payload_bytes(&signed_payload).ok_or(CloudRequirementsError::CacheWrite)?;
let serialized = serde_json::to_vec_pretty(&CloudRequirementsCacheFile {
signature: sign_cache_payload(&payload_bytes)
.ok_or(CloudRequirementsError::CacheWrite)?,
signed_payload,
})
.map_err(|_| CloudRequirementsError::CacheWrite)?;
if let Some(parent) = self.cache_path.parent() {
fs::create_dir_all(parent)
.await
.map_err(|_| CloudRequirementsError::CacheWrite)?;
}
fs::write(&self.cache_path, serialized)
.await
.map_err(|_| CloudRequirementsError::CacheWrite)?;
Ok(())
}
}
pub fn cloud_requirements_loader(
auth_manager: Arc<AuthManager>,
chatgpt_base_url: String,
codex_home: PathBuf,
) -> CloudRequirementsLoader {
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(BackendRequirementsFetcher::new(chatgpt_base_url)),
codex_home,
CLOUD_REQUIREMENTS_TIMEOUT,
);
let task = tokio::spawn(async move { service.fetch_with_timeout().await });
@@ -512,17 +252,13 @@ mod tests {
))
}
fn auth_manager_with_plan_and_identity(
plan_type: &str,
chatgpt_user_id: Option<&str>,
account_id: Option<&str>,
) -> Arc<AuthManager> {
fn auth_manager_with_plan(plan_type: &str) -> Arc<AuthManager> {
let tmp = tempdir().expect("tempdir");
let header = json!({ "alg": "none", "typ": "JWT" });
let auth_payload = json!({
"chatgpt_plan_type": plan_type,
"chatgpt_user_id": chatgpt_user_id,
"user_id": chatgpt_user_id,
"chatgpt_user_id": "user-12345",
"user_id": "user-12345",
});
let payload = json!({
"email": "user@example.com",
@@ -539,9 +275,8 @@ mod tests {
"id_token": fake_jwt,
"access_token": "test-access-token",
"refresh_token": "test-refresh-token",
"account_id": account_id,
},
"last_refresh": "2025-01-01T00:00:00Z",
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json).expect("write auth");
Arc::new(AuthManager::new(
@@ -551,10 +286,6 @@ mod tests {
))
}
fn auth_manager_with_plan(plan_type: &str) -> Arc<AuthManager> {
auth_manager_with_plan_and_identity(plan_type, Some("user-12345"), Some("account-12345"))
}
fn parse_for_fetch(contents: Option<&str>) -> Option<ConfigRequirementsToml> {
contents.and_then(|contents| parse_cloud_requirements(contents).ok().flatten())
}
@@ -616,11 +347,9 @@ mod tests {
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_chatgpt_auth() {
let auth_manager = auth_manager_with_api_key();
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(StaticFetcher { contents: None }),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
@@ -629,11 +358,9 @@ mod tests {
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_business_or_enterprise_plan() {
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("pro"),
Arc::new(StaticFetcher { contents: None }),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
@@ -642,13 +369,11 @@ mod tests {
#[tokio::test]
async fn fetch_cloud_requirements_allows_business_plan() {
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
@@ -710,11 +435,9 @@ mod tests {
#[tokio::test(start_paused = true)]
async fn fetch_cloud_requirements_times_out() {
let auth_manager = auth_manager_with_plan("enterprise");
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(PendingFetcher),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let handle = tokio::spawn(async move { service.fetch_with_timeout().await });
@@ -730,11 +453,9 @@ mod tests {
Err(FetchCloudRequirementsStatus::Request),
Ok(Some("allowed_approval_policies = [\"never\"]".to_string())),
]));
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
@@ -757,345 +478,15 @@ mod tests {
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn fetch_cloud_requirements_parse_error_does_not_retry() {
let fetcher = Arc::new(SequenceFetcher::new(vec![
Ok(Some("not = [".to_string())),
Ok(Some("allowed_approval_policies = [\"never\"]".to_string())),
]));
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert!(service.fetch().await.is_none());
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn fetch_cloud_requirements_uses_cache_when_valid() {
let codex_home = tempdir().expect("tempdir");
let prime_service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let _ = prime_service.fetch().await;
let fetcher = Arc::new(SequenceFetcher::new(vec![Err(
FetchCloudRequirementsStatus::Request,
)]));
let service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 0);
}
#[tokio::test]
async fn fetch_cloud_requirements_writes_cache_when_identity_is_incomplete() {
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan_and_identity("business", None, Some("account-12345")),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
let path = codex_home.path().join(CLOUD_REQUIREMENTS_CACHE_FILENAME);
let cache_file: CloudRequirementsCacheFile =
serde_json::from_str(&std::fs::read_to_string(path).expect("read cache"))
.expect("parse cache");
assert_eq!(cache_file.signed_payload.chatgpt_user_id, None);
assert_eq!(
cache_file.signed_payload.account_id,
Some("account-12345".to_string())
);
}
#[tokio::test]
async fn fetch_cloud_requirements_does_not_use_cache_when_auth_identity_is_incomplete() {
let codex_home = tempdir().expect("tempdir");
let prime_service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let _ = prime_service.fetch().await;
let fetcher = Arc::new(SequenceFetcher::new(vec![Ok(Some(
"allowed_approval_policies = [\"on-request\"]".to_string(),
))]));
let service = CloudRequirementsService::new(
auth_manager_with_plan_and_identity("business", None, Some("account-12345")),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::OnRequest]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_cache_for_different_auth_identity() {
let codex_home = tempdir().expect("tempdir");
let prime_service = CloudRequirementsService::new(
auth_manager_with_plan_and_identity(
"business",
Some("user-12345"),
Some("account-12345"),
),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let _ = prime_service.fetch().await;
let fetcher = Arc::new(SequenceFetcher::new(vec![Ok(Some(
"allowed_approval_policies = [\"on-request\"]".to_string(),
))]));
let service = CloudRequirementsService::new(
auth_manager_with_plan_and_identity(
"business",
Some("user-99999"),
Some("account-12345"),
),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::OnRequest]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_tampered_cache() {
let codex_home = tempdir().expect("tempdir");
let prime_service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let _ = prime_service.fetch().await;
let path = codex_home.path().join(CLOUD_REQUIREMENTS_CACHE_FILENAME);
let mut cache_file: CloudRequirementsCacheFile =
serde_json::from_str(&std::fs::read_to_string(&path).expect("read cache"))
.expect("parse cache");
cache_file.signed_payload.contents =
Some("allowed_approval_policies = [\"on-request\"]".to_string());
std::fs::write(
&path,
serde_json::to_vec_pretty(&cache_file).expect("serialize cache"),
)
.expect("write cache");
let fetcher = Arc::new(SequenceFetcher::new(vec![Ok(Some(
"allowed_approval_policies = [\"never\"]".to_string(),
))]));
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_expired_cache() {
let codex_home = tempdir().expect("tempdir");
let path = codex_home.path().join(CLOUD_REQUIREMENTS_CACHE_FILENAME);
let cache_file = CloudRequirementsCacheFile {
signed_payload: CloudRequirementsCacheSignedPayload {
cached_at: Utc::now(),
expires_at: Utc::now() - ChronoDuration::seconds(1),
chatgpt_user_id: Some("user-12345".to_string()),
account_id: Some("account-12345".to_string()),
contents: Some("allowed_approval_policies = [\"on-request\"]".to_string()),
},
signature: String::new(),
};
let payload_bytes = cache_payload_bytes(&cache_file.signed_payload).expect("payload");
let signature = sign_cache_payload(&payload_bytes).expect("sign payload");
let cache_file = CloudRequirementsCacheFile {
signature,
..cache_file
};
std::fs::write(
&path,
serde_json::to_vec_pretty(&cache_file).expect("serialize cache"),
)
.expect("write cache");
let fetcher = Arc::new(SequenceFetcher::new(vec![Ok(Some(
"allowed_approval_policies = [\"never\"]".to_string(),
))]));
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn fetch_cloud_requirements_writes_signed_cache() {
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("business"),
Arc::new(StaticFetcher {
contents: Some("allowed_approval_policies = [\"never\"]".to_string()),
}),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let _ = service.fetch().await;
let path = codex_home.path().join(CLOUD_REQUIREMENTS_CACHE_FILENAME);
let cache_file: CloudRequirementsCacheFile =
serde_json::from_str(&std::fs::read_to_string(path).expect("read cache"))
.expect("parse cache");
assert!(cache_file.signed_payload.expires_at > Utc::now());
assert!(cache_file.signed_payload.cached_at <= Utc::now());
assert_eq!(
cache_file.signed_payload.chatgpt_user_id,
Some("user-12345".to_string())
);
assert_eq!(
cache_file.signed_payload.account_id,
Some("account-12345".to_string())
);
assert_eq!(
cache_file
.signed_payload
.contents
.as_deref()
.and_then(|contents| parse_cloud_requirements(contents).ok().flatten()),
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
allowed_web_search_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
let payload_bytes = cache_payload_bytes(&cache_file.signed_payload).expect("payload bytes");
assert!(verify_cache_signature(
&payload_bytes,
&cache_file.signature
));
}
#[tokio::test]
async fn fetch_cloud_requirements_none_is_success_without_retry() {
let fetcher = Arc::new(SequenceFetcher::new(vec![
Ok(None),
Err(FetchCloudRequirementsStatus::Request),
]));
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);
@@ -1111,11 +502,9 @@ mod tests {
);
CLOUD_REQUIREMENTS_MAX_ATTEMPTS
]));
let codex_home = tempdir().expect("tempdir");
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise"),
fetcher.clone(),
codex_home.path().to_path_buf(),
CLOUD_REQUIREMENTS_TIMEOUT,
);

View File

@@ -20,10 +20,10 @@ codex-cloud-tasks-client = { path = "../cloud-tasks-client", features = [
"mock",
"online",
] }
codex-common = { path = "../common", features = ["cli"] }
codex-core = { path = "../core" }
codex-login = { path = "../login" }
codex-tui = { path = "../tui" }
codex-utils-cli = { workspace = true }
crossterm = { workspace = true, features = ["event-stream"] }
ratatui = { workspace = true }
reqwest = { workspace = true, features = ["json"] }

View File

@@ -1,6 +1,6 @@
use clap::Args;
use clap::Parser;
use codex_utils_cli::CliConfigOverrides;
use codex_common::CliConfigOverrides;
#[derive(Parser, Debug, Default)]
#[command(version)]

View File

@@ -63,8 +63,6 @@ pub enum ResponseEvent {
Completed {
response_id: String,
token_usage: Option<TokenUsage>,
/// Whether the client can append more items to a long-running websocket response.
can_append: bool,
},
OutputTextDelta(String),
ReasoningSummaryDelta {

View File

@@ -66,7 +66,6 @@ impl Stream for AggregatedStream {
Poll::Ready(Some(Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append: _can_append,
}))) => {
let mut emitted_any = false;
@@ -103,7 +102,6 @@ impl Stream for AggregatedStream {
this.pending.push_back(ResponseEvent::Completed {
response_id: response_id.clone(),
token_usage: token_usage.clone(),
can_append: false,
});
if let Some(ev) = this.pending.pop_front() {
return Poll::Ready(Some(Ok(ev)));
@@ -113,7 +111,6 @@ impl Stream for AggregatedStream {
return Poll::Ready(Some(Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append: false,
})));
}
Poll::Ready(Some(Ok(ResponseEvent::Created))) => continue,

View File

@@ -67,22 +67,11 @@ struct SummarizeResponse {
#[cfg(test)]
mod tests {
use super::*;
use crate::common::RawMemory;
use crate::common::RawMemoryMetadata;
use crate::provider::RetryConfig;
use async_trait::async_trait;
use codex_client::Request;
use codex_client::Response;
use codex_client::StreamResponse;
use codex_client::TransportError;
use http::HeaderMap;
use http::Method;
use http::StatusCode;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
#[derive(Clone, Default)]
struct DummyTransport;
@@ -107,54 +96,6 @@ mod tests {
}
}
#[derive(Clone)]
struct CapturingTransport {
last_request: Arc<Mutex<Option<Request>>>,
response_body: Arc<Vec<u8>>,
}
impl CapturingTransport {
fn new(response_body: Vec<u8>) -> Self {
Self {
last_request: Arc::new(Mutex::new(None)),
response_body: Arc::new(response_body),
}
}
}
#[async_trait]
impl HttpTransport for CapturingTransport {
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
*self.last_request.lock().expect("lock request store") = Some(req);
Ok(Response {
status: StatusCode::OK,
headers: HeaderMap::new(),
body: self.response_body.as_ref().clone().into(),
})
}
async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> {
Err(TransportError::Build("stream should not run".to_string()))
}
}
fn provider(base_url: &str) -> Provider {
Provider {
name: "test".to_string(),
base_url: base_url.to_string(),
query_params: None,
headers: HeaderMap::new(),
retry: RetryConfig {
max_attempts: 1,
base_delay: Duration::from_millis(1),
retry_429: false,
retry_5xx: true,
retry_transport: true,
},
stream_idle_timeout: Duration::from_secs(1),
}
}
#[test]
fn path_is_memories_trace_summarize_for_wire_compatibility() {
assert_eq!(
@@ -162,63 +103,4 @@ mod tests {
"memories/trace_summarize"
);
}
#[tokio::test]
async fn summarize_input_posts_expected_payload_and_parses_output() {
let transport = CapturingTransport::new(
serde_json::to_vec(&json!({
"output": [
{
"trace_summary": "raw summary",
"memory_summary": "memory summary"
}
]
}))
.expect("serialize response"),
);
let client = MemoriesClient::new(
transport.clone(),
provider("https://example.com/api/codex"),
DummyAuth,
);
let input = MemorySummarizeInput {
model: "gpt-test".to_string(),
raw_memories: vec![RawMemory {
id: "trace-1".to_string(),
metadata: RawMemoryMetadata {
source_path: "/tmp/trace.json".to_string(),
},
items: vec![json!({"type": "message", "role": "user", "content": []})],
}],
reasoning: None,
};
let output = client
.summarize_input(&input, HeaderMap::new())
.await
.expect("summarize input request should succeed");
assert_eq!(output.len(), 1);
assert_eq!(output[0].raw_memory, "raw summary");
assert_eq!(output[0].memory_summary, "memory summary");
let request = transport
.last_request
.lock()
.expect("lock request store")
.clone()
.expect("request should be captured");
assert_eq!(request.method, Method::POST);
assert_eq!(
request.url,
"https://example.com/api/codex/memories/trace_summarize"
);
let body = request.body.expect("request body should be present");
assert_eq!(body["model"], "gpt-test");
assert_eq!(body["traces"][0]["id"], "trace-1");
assert_eq!(
body["traces"][0]["metadata"]["source_path"],
"/tmp/trace.json"
);
}
}

View File

@@ -26,7 +26,6 @@ use std::time::Duration;
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::time::Instant;
use tokio_tungstenite::MaybeTlsStream;
use tokio_tungstenite::WebSocketStream;
@@ -42,124 +41,7 @@ use tungstenite::extensions::compression::deflate::DeflateConfig;
use tungstenite::protocol::WebSocketConfig;
use url::Url;
struct WsStream {
tx_command: mpsc::Sender<WsCommand>,
rx_message: mpsc::UnboundedReceiver<Result<Message, WsError>>,
pump_task: tokio::task::JoinHandle<()>,
}
enum WsCommand {
Send {
message: Message,
tx_result: oneshot::Sender<Result<(), WsError>>,
},
Close {
tx_result: oneshot::Sender<Result<(), WsError>>,
},
}
impl WsStream {
fn new(inner: WebSocketStream<MaybeTlsStream<TcpStream>>) -> Self {
let (tx_command, mut rx_command) = mpsc::channel::<WsCommand>(32);
let (tx_message, rx_message) = mpsc::unbounded_channel::<Result<Message, WsError>>();
let pump_task = tokio::spawn(async move {
let mut inner = inner;
loop {
tokio::select! {
command = rx_command.recv() => {
let Some(command) = command else {
break;
};
match command {
WsCommand::Send { message, tx_result } => {
let result = inner.send(message).await;
let should_break = result.is_err();
let _ = tx_result.send(result);
if should_break {
break;
}
}
WsCommand::Close { tx_result } => {
let result = inner.close(None).await;
let _ = tx_result.send(result);
break;
}
}
}
message = inner.next() => {
let Some(message) = message else {
break;
};
match message {
Ok(Message::Ping(payload)) => {
if let Err(err) = inner.send(Message::Pong(payload)).await {
let _ = tx_message.send(Err(err));
break;
}
}
Ok(Message::Pong(_)) => {}
Ok(message @ (Message::Text(_)
| Message::Binary(_)
| Message::Close(_)
| Message::Frame(_))) => {
let is_close = matches!(message, Message::Close(_));
if tx_message.send(Ok(message)).is_err() {
break;
}
if is_close {
break;
}
}
Err(err) => {
let _ = tx_message.send(Err(err));
break;
}
}
}
}
}
});
Self {
tx_command,
rx_message,
pump_task,
}
}
async fn request(
&self,
make_command: impl FnOnce(oneshot::Sender<Result<(), WsError>>) -> WsCommand,
) -> Result<(), WsError> {
let (tx_result, rx_result) = oneshot::channel();
if self.tx_command.send(make_command(tx_result)).await.is_err() {
return Err(WsError::ConnectionClosed);
}
rx_result.await.unwrap_or(Err(WsError::ConnectionClosed))
}
async fn send(&self, message: Message) -> Result<(), WsError> {
self.request(|tx_result| WsCommand::Send { message, tx_result })
.await
}
async fn close(&self) -> Result<(), WsError> {
self.request(|tx_result| WsCommand::Close { tx_result })
.await
}
async fn next(&mut self) -> Option<Result<Message, WsError>> {
self.rx_message.recv().await
}
}
impl Drop for WsStream {
fn drop(&mut self) {
self.pump_task.abort();
}
}
type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state";
const X_MODELS_ETAG_HEADER: &str = "x-models-etag";
const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included";
@@ -237,7 +119,7 @@ impl ResponsesWebsocketConnection {
)
.await
{
let _ = ws_stream.close().await;
let _ = ws_stream.close(None).await;
*guard = None;
let _ = tx_event.send(Err(err)).await;
}
@@ -260,7 +142,6 @@ impl<A: AuthProvider> ResponsesWebsocketClient<A> {
pub async fn connect(
&self,
extra_headers: HeaderMap,
default_headers: HeaderMap,
turn_state: Option<Arc<OnceLock<String>>>,
telemetry: Option<Arc<dyn WebsocketTelemetry>>,
) -> Result<ResponsesWebsocketConnection, ApiError> {
@@ -269,8 +150,8 @@ impl<A: AuthProvider> ResponsesWebsocketClient<A> {
.websocket_url_for_path("responses")
.map_err(|err| ApiError::Stream(format!("failed to build websocket URL: {err}")))?;
let mut headers =
merge_request_headers(&self.provider.headers, extra_headers, default_headers);
let mut headers = self.provider.headers.clone();
headers.extend(extra_headers);
add_auth_headers_to_header_map(&self.auth, &mut headers);
let (stream, server_reasoning_included, models_etag) =
@@ -285,21 +166,6 @@ impl<A: AuthProvider> ResponsesWebsocketClient<A> {
}
}
fn merge_request_headers(
provider_headers: &HeaderMap,
extra_headers: HeaderMap,
default_headers: HeaderMap,
) -> HeaderMap {
let mut headers = provider_headers.clone();
headers.extend(extra_headers);
for (name, value) in &default_headers {
if let http::header::Entry::Vacant(entry) = headers.entry(name) {
entry.insert(value.clone());
}
}
headers
}
async fn connect_websocket(
url: Url,
headers: HeaderMap,
@@ -349,7 +215,7 @@ async fn connect_websocket(
{
let _ = turn_state.set(header_value.to_string());
}
Ok((WsStream::new(stream), reasoning_included, models_etag))
Ok((stream, reasoning_included, models_etag))
}
fn websocket_config() -> WebSocketConfig {
@@ -477,7 +343,6 @@ async fn run_websocket_response_stream(
)));
}
};
trace!("websocket request: {request_text}");
let request_start = Instant::now();
let result = ws_stream
@@ -553,13 +418,18 @@ async fn run_websocket_response_stream(
Message::Binary(_) => {
return Err(ApiError::Stream("unexpected binary websocket event".into()));
}
Message::Ping(payload) => {
if ws_stream.send(Message::Pong(payload)).await.is_err() {
return Err(ApiError::Stream("websocket ping failed".into()));
}
}
Message::Pong(_) => {}
Message::Close(_) => {
return Err(ApiError::Stream(
"websocket closed by server before response.completed".into(),
));
}
Message::Frame(_) => {}
Message::Ping(_) | Message::Pong(_) => {}
_ => {}
}
}
@@ -689,37 +559,4 @@ mod tests {
let api_error = map_wrapped_websocket_error_event(wrapped_error);
assert!(api_error.is_none());
}
#[test]
fn merge_request_headers_matches_http_precedence() {
let mut provider_headers = HeaderMap::new();
provider_headers.insert(
"originator",
HeaderValue::from_static("provider-originator"),
);
provider_headers.insert("x-priority", HeaderValue::from_static("provider"));
let mut extra_headers = HeaderMap::new();
extra_headers.insert("x-priority", HeaderValue::from_static("extra"));
let mut default_headers = HeaderMap::new();
default_headers.insert("originator", HeaderValue::from_static("default-originator"));
default_headers.insert("x-priority", HeaderValue::from_static("default"));
default_headers.insert("x-default-only", HeaderValue::from_static("default-only"));
let merged = merge_request_headers(&provider_headers, extra_headers, default_headers);
assert_eq!(
merged.get("originator"),
Some(&HeaderValue::from_static("provider-originator"))
);
assert_eq!(
merged.get("x-priority"),
Some(&HeaderValue::from_static("extra"))
);
assert_eq!(
merged.get("x-default-only"),
Some(&HeaderValue::from_static("default-only"))
);
}
}

View File

@@ -27,8 +27,6 @@ pub enum ApiError {
RateLimit(String),
#[error("invalid request: {message}")]
InvalidRequest { message: String },
#[error("server overloaded")]
ServerOverloaded,
}
impl From<RateLimitError> for ApiError {

View File

@@ -4,7 +4,6 @@ use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use http::HeaderMap;
use serde::Deserialize;
use std::collections::BTreeSet;
use std::fmt::Display;
#[derive(Debug)]
@@ -18,77 +17,25 @@ impl Display for RateLimitError {
}
}
/// Parses the default Codex rate-limit header family into a `RateLimitSnapshot`.
pub fn parse_default_rate_limit(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
parse_rate_limit_for_limit(headers, None)
}
/// Parses all known rate-limit header families into update records keyed by limit id.
pub fn parse_all_rate_limits(headers: &HeaderMap) -> Vec<RateLimitSnapshot> {
let mut snapshots = Vec::new();
if let Some(snapshot) = parse_default_rate_limit(headers) {
snapshots.push(snapshot);
}
let mut limit_ids: BTreeSet<String> = BTreeSet::new();
for name in headers.keys() {
let header_name = name.as_str().to_ascii_lowercase();
if let Some(limit_id) = header_name_to_limit_id(&header_name)
&& limit_id != "codex"
{
limit_ids.insert(limit_id);
}
}
snapshots.extend(limit_ids.into_iter().filter_map(|limit_id| {
let snapshot = parse_rate_limit_for_limit(headers, Some(limit_id.as_str()))?;
has_rate_limit_data(&snapshot).then_some(snapshot)
}));
snapshots
}
/// Parses rate-limit headers for the provided limit id.
///
/// `limit_id` should match the server-provided metered limit id (e.g. `codex`,
/// `codex_other`). When omitted, this defaults to the legacy `codex` header family.
pub fn parse_rate_limit_for_limit(
headers: &HeaderMap,
limit_id: Option<&str>,
) -> Option<RateLimitSnapshot> {
let normalized_limit = limit_id
.map(str::trim)
.filter(|name| !name.is_empty())
.unwrap_or("codex")
.to_ascii_lowercase()
.replace('_', "-");
let prefix = format!("x-{normalized_limit}");
/// Parses the bespoke Codex rate-limit headers into a `RateLimitSnapshot`.
pub fn parse_rate_limit(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
let primary = parse_rate_limit_window(
headers,
&format!("{prefix}-primary-used-percent"),
&format!("{prefix}-primary-window-minutes"),
&format!("{prefix}-primary-reset-at"),
"x-codex-primary-used-percent",
"x-codex-primary-window-minutes",
"x-codex-primary-reset-at",
);
let secondary = parse_rate_limit_window(
headers,
&format!("{prefix}-secondary-used-percent"),
&format!("{prefix}-secondary-window-minutes"),
&format!("{prefix}-secondary-reset-at"),
"x-codex-secondary-used-percent",
"x-codex-secondary-window-minutes",
"x-codex-secondary-reset-at",
);
let normalized_limit_id = normalize_limit_id(normalized_limit);
let credits = parse_credits_snapshot(headers);
let limit_name_header = format!("{prefix}-limit-name");
let parsed_limit_name = parse_header_str(headers, &limit_name_header)
.map(str::trim)
.filter(|name| !name.is_empty())
.map(std::string::ToString::to_string);
Some(RateLimitSnapshot {
limit_id: Some(normalized_limit_id),
limit_name: parsed_limit_name,
primary,
secondary,
credits,
@@ -123,8 +70,6 @@ struct RateLimitEvent {
plan_type: Option<PlanType>,
rate_limits: Option<RateLimitEventDetails>,
credits: Option<RateLimitEventCredits>,
metered_limit_name: Option<String>,
limit_name: Option<String>,
}
pub fn parse_rate_limit_event(payload: &str) -> Option<RateLimitSnapshot> {
@@ -145,13 +90,7 @@ pub fn parse_rate_limit_event(payload: &str) -> Option<RateLimitSnapshot> {
unlimited: credits.unlimited,
balance: credits.balance,
});
let limit_id = event
.metered_limit_name
.or(event.limit_name)
.map(normalize_limit_id);
Some(RateLimitSnapshot {
limit_id: Some(limit_id.unwrap_or_else(|| "codex".to_string())),
limit_name: None,
primary,
secondary,
credits,
@@ -239,128 +178,3 @@ fn parse_header_bool(headers: &HeaderMap, name: &str) -> Option<bool> {
fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> {
headers.get(name)?.to_str().ok()
}
fn has_rate_limit_data(snapshot: &RateLimitSnapshot) -> bool {
snapshot.primary.is_some() || snapshot.secondary.is_some() || snapshot.credits.is_some()
}
fn header_name_to_limit_id(header_name: &str) -> Option<String> {
let suffix = "-primary-used-percent";
let prefix = header_name.strip_suffix(suffix)?;
let limit = prefix.strip_prefix("x-")?;
Some(normalize_limit_id(limit.to_string()))
}
fn normalize_limit_id(name: impl Into<String>) -> String {
name.into().trim().to_ascii_lowercase().replace('-', "_")
}
#[cfg(test)]
mod tests {
use super::*;
use http::HeaderValue;
use pretty_assertions::assert_eq;
#[test]
fn parse_rate_limit_for_limit_defaults_to_codex_headers() {
let mut headers = HeaderMap::new();
headers.insert(
"x-codex-primary-used-percent",
HeaderValue::from_static("12.5"),
);
headers.insert(
"x-codex-primary-window-minutes",
HeaderValue::from_static("60"),
);
headers.insert(
"x-codex-primary-reset-at",
HeaderValue::from_static("1704069000"),
);
let snapshot = parse_rate_limit_for_limit(&headers, None).expect("snapshot");
assert_eq!(snapshot.limit_id.as_deref(), Some("codex"));
assert_eq!(snapshot.limit_name, None);
let primary = snapshot.primary.expect("primary");
assert_eq!(primary.used_percent, 12.5);
assert_eq!(primary.window_minutes, Some(60));
assert_eq!(primary.resets_at, Some(1704069000));
}
#[test]
fn parse_rate_limit_for_limit_reads_secondary_headers() {
let mut headers = HeaderMap::new();
headers.insert(
"x-codex-secondary-primary-used-percent",
HeaderValue::from_static("80"),
);
headers.insert(
"x-codex-secondary-primary-window-minutes",
HeaderValue::from_static("1440"),
);
headers.insert(
"x-codex-secondary-primary-reset-at",
HeaderValue::from_static("1704074400"),
);
let snapshot =
parse_rate_limit_for_limit(&headers, Some("codex_secondary")).expect("snapshot");
assert_eq!(snapshot.limit_id.as_deref(), Some("codex_secondary"));
assert_eq!(snapshot.limit_name, None);
let primary = snapshot.primary.expect("primary");
assert_eq!(primary.used_percent, 80.0);
assert_eq!(primary.window_minutes, Some(1440));
assert_eq!(primary.resets_at, Some(1704074400));
assert_eq!(snapshot.secondary, None);
}
#[test]
fn parse_rate_limit_for_limit_prefers_limit_name_header() {
let mut headers = HeaderMap::new();
headers.insert(
"x-codex-bengalfox-primary-used-percent",
HeaderValue::from_static("80"),
);
headers.insert(
"x-codex-bengalfox-limit-name",
HeaderValue::from_static("gpt-5.2-codex-sonic"),
);
let snapshot =
parse_rate_limit_for_limit(&headers, Some("codex_bengalfox")).expect("snapshot");
assert_eq!(snapshot.limit_id.as_deref(), Some("codex_bengalfox"));
assert_eq!(snapshot.limit_name.as_deref(), Some("gpt-5.2-codex-sonic"));
}
#[test]
fn parse_all_rate_limits_reads_all_limit_families() {
let mut headers = HeaderMap::new();
headers.insert(
"x-codex-primary-used-percent",
HeaderValue::from_static("12.5"),
);
headers.insert(
"x-codex-secondary-primary-used-percent",
HeaderValue::from_static("80"),
);
let updates = parse_all_rate_limits(&headers);
assert_eq!(updates.len(), 2);
assert_eq!(updates[0].limit_id.as_deref(), Some("codex"));
assert_eq!(updates[1].limit_id.as_deref(), Some("codex_secondary"));
assert_eq!(updates[0].limit_name, None);
assert_eq!(updates[1].limit_name, None);
}
#[test]
fn parse_all_rate_limits_includes_default_codex_snapshot() {
let headers = HeaderMap::new();
let updates = parse_all_rate_limits(&headers);
assert_eq!(updates.len(), 1);
assert_eq!(updates[0].limit_id.as_deref(), Some("codex"));
assert_eq!(updates[0].limit_name, None);
assert_eq!(updates[0].primary, None);
assert_eq!(updates[0].secondary, None);
assert_eq!(updates[0].credits, None);
}
}

View File

@@ -1,7 +1,7 @@
use crate::common::ResponseEvent;
use crate::common::ResponseStream;
use crate::error::ApiError;
use crate::rate_limits::parse_all_rate_limits;
use crate::rate_limits::parse_rate_limit;
use crate::telemetry::SseTelemetry;
use codex_client::ByteStream;
use codex_client::StreamResponse;
@@ -54,7 +54,7 @@ pub fn spawn_response_stream(
telemetry: Option<Arc<dyn SseTelemetry>>,
turn_state: Option<Arc<OnceLock<String>>>,
) -> ResponseStream {
let rate_limit_snapshots = parse_all_rate_limits(&stream_response.headers);
let rate_limits = parse_rate_limit(&stream_response.headers);
let models_etag = stream_response
.headers
.get("X-Models-Etag")
@@ -74,7 +74,7 @@ pub fn spawn_response_stream(
}
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
tokio::spawn(async move {
for snapshot in rate_limit_snapshots {
if let Some(snapshot) = rate_limits {
let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await;
}
if let Some(etag) = models_etag {
@@ -239,8 +239,6 @@ pub fn process_responses_event(
.message
.unwrap_or_else(|| "Invalid request.".to_string());
response_error = ApiError::InvalidRequest { message };
} else if is_server_overloaded_error(&error) {
response_error = ApiError::ServerOverloaded;
} else {
let delay = try_parse_retry_after(&error);
let message = error.message.unwrap_or_default();
@@ -261,7 +259,6 @@ pub fn process_responses_event(
return Ok(Some(ResponseEvent::Completed {
response_id: resp.id,
token_usage: resp.usage.map(Into::into),
can_append: false,
}));
}
Err(err) => {
@@ -279,7 +276,6 @@ pub fn process_responses_event(
return Ok(Some(ResponseEvent::Completed {
response_id: resp.id.unwrap_or_default(),
token_usage: resp.usage.map(Into::into),
can_append: true,
}));
}
Err(err) => {
@@ -294,7 +290,6 @@ pub fn process_responses_event(
return Ok(Some(ResponseEvent::Completed {
response_id: String::new(),
token_usage: None,
can_append: true,
}));
}
"response.output_item.added" => {
@@ -427,11 +422,6 @@ fn is_invalid_prompt_error(error: &Error) -> bool {
error.code.as_deref() == Some("invalid_prompt")
}
fn is_server_overloaded_error(error: &Error) -> bool {
error.code.as_deref() == Some("server_is_overloaded")
|| error.code.as_deref() == Some("slow_down")
}
fn rate_limit_regex() -> &'static regex_lite::Regex {
static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new();
#[expect(clippy::unwrap_used)]
@@ -558,11 +548,9 @@ mod tests {
Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append,
}) => {
assert_eq!(response_id, "resp1");
assert!(token_usage.is_none());
assert!(!can_append);
}
other => panic!("unexpected third event: {other:?}"),
}
@@ -597,7 +585,7 @@ mod tests {
}
#[tokio::test]
async fn response_done_emits_incremental_completed() {
async fn response_done_emits_completed() {
let done = json!({
"type": "response.done",
"response": {
@@ -622,11 +610,9 @@ mod tests {
Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append,
}) => {
assert_eq!(response_id, "");
assert!(token_usage.is_some());
assert!(*can_append);
}
other => panic!("unexpected event: {other:?}"),
}
@@ -649,11 +635,9 @@ mod tests {
Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append,
}) => {
assert_eq!(response_id, "");
assert!(token_usage.is_none());
assert!(*can_append);
}
other => panic!("unexpected event: {other:?}"),
}
@@ -689,11 +673,9 @@ mod tests {
Ok(ResponseEvent::Completed {
response_id,
token_usage,
can_append,
}) => {
assert_eq!(response_id, "resp1");
assert!(token_usage.is_none());
assert!(!can_append);
}
other => panic!("unexpected event: {other:?}"),
}

View File

@@ -88,7 +88,6 @@ async fn models_client_hits_models_endpoint() {
effective_context_window_percent: 95,
experimental_supported_tools: Vec::new(),
input_modalities: default_input_modalities(),
prefer_websockets: false,
}],
};

View File

@@ -161,11 +161,9 @@ async fn responses_stream_parses_items_and_completed_end_to_end() -> Result<()>
ResponseEvent::Completed {
response_id,
token_usage,
can_append,
} => {
assert_eq!(response_id, "resp1");
assert!(token_usage.is_none());
assert!(!can_append);
}
other => panic!("unexpected third event: {other:?}"),
}

View File

@@ -1,38 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdditionalRateLimitDetails {
#[serde(rename = "limit_name")]
pub limit_name: String,
#[serde(rename = "metered_feature")]
pub metered_feature: String,
#[serde(
rename = "rate_limit",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub rate_limit: Option<Option<Box<models::RateLimitStatusDetails>>>,
}
impl AdditionalRateLimitDetails {
pub fn new(limit_name: String, metered_feature: String) -> AdditionalRateLimitDetails {
AdditionalRateLimitDetails {
limit_name,
metered_feature,
rate_limit: None,
}
}
}

View File

@@ -27,9 +27,6 @@ pub mod paginated_list_task_list_item_;
pub use self::paginated_list_task_list_item_::PaginatedListTaskListItem;
// Rate Limits
pub mod additional_rate_limit_details;
pub use self::additional_rate_limit_details::AdditionalRateLimitDetails;
pub mod rate_limit_status_payload;
pub use self::rate_limit_status_payload::PlanType;
pub use self::rate_limit_status_payload::RateLimitStatusPayload;

View File

@@ -30,13 +30,6 @@ pub struct RateLimitStatusPayload {
skip_serializing_if = "Option::is_none"
)]
pub credits: Option<Option<Box<models::CreditStatusDetails>>>,
#[serde(
rename = "additional_rate_limits",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub additional_rate_limits: Option<Option<Vec<models::AdditionalRateLimitDetails>>>,
}
impl RateLimitStatusPayload {
@@ -45,7 +38,6 @@ impl RateLimitStatusPayload {
plan_type,
rate_limit: None,
credits: None,
additional_rate_limits: None,
}
}
}

View File

@@ -0,0 +1,11 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "common",
crate_name = "codex_common",
crate_features = [
"cli",
"elapsed",
"sandbox_summary",
],
)

View File

@@ -0,0 +1,30 @@
[package]
name = "codex-common"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
clap = { workspace = true, features = ["derive", "wrap_help"], optional = true }
codex-core = { workspace = true }
codex-lmstudio = { workspace = true }
codex-ollama = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true, optional = true }
toml = { workspace = true, optional = true }
[features]
# Separate feature so that `clap` is not a mandatory dependency.
cli = ["clap", "serde", "toml"]
elapsed = []
sandbox_summary = []
[dev-dependencies]
clap = { workspace = true, features = ["derive", "wrap_help"] }
codex-utils-absolute-path = { workspace = true }
pretty_assertions = { workspace = true }
serde = { workspace = true }
toml = { workspace = true }

View File

@@ -0,0 +1,5 @@
# codex-common
This crate is designed for utilities that need to be shared across other crates in the workspace, but should not go in `core`.
For narrow utility features, the pattern is to add introduce a new feature under `[features]` in `Cargo.toml` and then gate it with `#[cfg]` in `lib.rs`, as appropriate.

View File

@@ -1,4 +1,5 @@
//! Standard type to use with the `--approval-mode` CLI option.
//! Available when the `cli` feature is enabled for the crate.
use clap::ValueEnum;

View File

@@ -149,7 +149,7 @@ fn parse_toml_value(raw: &str) -> Result<Value, toml::de::Error> {
.ok_or_else(|| SerdeError::custom("missing sentinel key"))
}
#[cfg(test)]
#[cfg(all(test, feature = "cli"))]
mod tests {
use super::*;

Some files were not shown because too many files have changed in this diff Show More