Compare commits

..

1 Commits

Author SHA1 Message Date
kevin zhao
8832459f7f draft impl of execpolicy in posix 2025-12-01 13:04:56 -05:00
1216 changed files with 10185 additions and 98859 deletions

View File

@@ -1,2 +1 @@
iTerm
psuedo

View File

@@ -1,44 +0,0 @@
name: linux-code-sign
description: Sign Linux artifacts with cosign.
inputs:
target:
description: Target triple for the artifacts to sign.
required: true
artifacts-dir:
description: Absolute path to the directory containing built binaries to sign.
required: true
runs:
using: composite
steps:
- name: Install cosign
uses: sigstore/cosign-installer@v3.7.0
- name: Cosign Linux artifacts
shell: bash
env:
COSIGN_EXPERIMENTAL: "1"
COSIGN_YES: "true"
COSIGN_OIDC_CLIENT_ID: "sigstore"
COSIGN_OIDC_ISSUER: "https://oauth2.sigstore.dev/auth"
run: |
set -euo pipefail
dest="${{ inputs.artifacts-dir }}"
if [[ ! -d "$dest" ]]; then
echo "Destination $dest does not exist"
exit 1
fi
for binary in codex codex-responses-api-proxy; do
artifact="${dest}/${binary}"
if [[ ! -f "$artifact" ]]; then
echo "Binary $artifact not found"
exit 1
fi
cosign sign-blob \
--yes \
--bundle "${artifact}.sigstore" \
"$artifact"
done

View File

@@ -1,212 +0,0 @@
name: macos-code-sign
description: Configure, sign, notarize, and clean up macOS code signing artifacts.
inputs:
target:
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
required: true
apple-certificate:
description: Base64-encoded Apple signing certificate (P12).
required: true
apple-certificate-password:
description: Password for the signing certificate.
required: true
apple-notarization-key-p8:
description: Base64-encoded Apple notarization key (P8).
required: true
apple-notarization-key-id:
description: Apple notarization key ID.
required: true
apple-notarization-issuer-id:
description: Apple notarization issuer ID.
required: true
runs:
using: composite
steps:
- name: Configure Apple code signing
shell: bash
env:
KEYCHAIN_PASSWORD: actions
APPLE_CERTIFICATE: ${{ inputs.apple-certificate }}
APPLE_CERTIFICATE_PASSWORD: ${{ inputs.apple-certificate-password }}
run: |
set -euo pipefail
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
echo "APPLE_CERTIFICATE is required for macOS signing"
exit 1
fi
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
exit 1
fi
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
security set-keychain-settings -lut 21600 "$keychain_path"
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
keychain_args=()
cleanup_keychain() {
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}" || true
security default-keychain -s "${keychain_args[0]}" || true
else
security list-keychains -s || true
fi
if [[ -f "$keychain_path" ]]; then
security delete-keychain "$keychain_path" || true
fi
}
while IFS= read -r keychain; do
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "$keychain_path" "${keychain_args[@]}"
else
security list-keychains -s "$keychain_path"
fi
security default-keychain -s "$keychain_path"
security import "$cert_path" -k "$keychain_path" -P "$APPLE_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/security
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" "$keychain_path" > /dev/null
codesign_hashes=()
while IFS= read -r hash; do
[[ -n "$hash" ]] && codesign_hashes+=("$hash")
done < <(security find-identity -v -p codesigning "$keychain_path" \
| sed -n 's/.*\([0-9A-F]\{40\}\).*/\1/p' \
| sort -u)
if ((${#codesign_hashes[@]} == 0)); then
echo "No signing identities found in $keychain_path"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
if ((${#codesign_hashes[@]} > 1)); then
echo "Multiple signing identities found in $keychain_path:"
printf ' %s\n' "${codesign_hashes[@]}"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
APPLE_CODESIGN_IDENTITY="${codesign_hashes[0]}"
rm -f "$cert_path"
echo "APPLE_CODESIGN_IDENTITY=$APPLE_CODESIGN_IDENTITY" >> "$GITHUB_ENV"
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- name: Sign macOS binaries
shell: bash
run: |
set -euo pipefail
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
for binary in codex codex-responses-api-proxy; do
path="codex-rs/target/${{ inputs.target }}/release/${binary}"
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- name: Notarize macOS binaries
shell: bash
env:
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
run: |
set -euo pipefail
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required for notarization"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
notarize_binary() {
local binary="$1"
local source_path="codex-rs/target/${{ inputs.target }}/release/${binary}"
local archive_path="${RUNNER_TEMP}/${binary}.zip"
if [[ ! -f "$source_path" ]]; then
echo "Binary $source_path not found"
exit 1
fi
rm -f "$archive_path"
ditto -c -k --keepParent "$source_path" "$archive_path"
submission_json=$(xcrun notarytool submit "$archive_path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $binary"
exit 1
fi
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
exit 1
fi
}
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Remove signing keychain
if: ${{ always() }}
shell: bash
env:
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
run: |
set -euo pipefail
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" ]]; then
keychain_args=()
while IFS= read -r keychain; do
[[ "$keychain" == "$APPLE_CODESIGN_KEYCHAIN" ]] && continue
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}"
security default-keychain -s "${keychain_args[0]}"
fi
if [[ -f "$APPLE_CODESIGN_KEYCHAIN" ]]; then
security delete-keychain "$APPLE_CODESIGN_KEYCHAIN"
fi
fi

View File

@@ -1,57 +0,0 @@
name: windows-code-sign
description: Sign Windows binaries with Azure Trusted Signing.
inputs:
target:
description: Target triple for the artifacts to sign.
required: true
client-id:
description: Azure Trusted Signing client ID.
required: true
tenant-id:
description: Azure tenant ID for Trusted Signing.
required: true
subscription-id:
description: Azure subscription ID for Trusted Signing.
required: true
endpoint:
description: Azure Trusted Signing endpoint.
required: true
account-name:
description: Azure Trusted Signing account name.
required: true
certificate-profile-name:
description: Certificate profile name for signing.
required: true
runs:
using: composite
steps:
- name: Azure login for Trusted Signing (OIDC)
uses: azure/login@v2
with:
client-id: ${{ inputs.client-id }}
tenant-id: ${{ inputs.tenant-id }}
subscription-id: ${{ inputs.subscription-id }}
- name: Sign Windows binaries with Azure Trusted Signing
uses: azure/trusted-signing-action@v0
with:
endpoint: ${{ inputs.endpoint }}
trusted-signing-account-name: ${{ inputs.account-name }}
certificate-profile-name: ${{ inputs.certificate-profile-name }}
exclude-environment-credential: true
exclude-workload-identity-credential: true
exclude-managed-identity-credential: true
exclude-shared-token-cache-credential: true
exclude-visual-studio-credential: true
exclude-visual-studio-code-credential: true
exclude-azure-cli-credential: false
exclude-azure-powershell-credential: true
exclude-azure-developer-cli-credential: true
exclude-interactive-browser-credential: true
cache-dependencies: false
files: |
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe

View File

@@ -20,7 +20,7 @@ jobs:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: 22
@@ -46,7 +46,7 @@ jobs:
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
- name: Upload staged npm package artifact
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: codex-npm-staging
path: ${{ steps.stage_npm_package.outputs.pack_output }}

View File

@@ -166,7 +166,7 @@ jobs:
# avoid caching the large target dir on the gnu-dev job.
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: |
~/.cargo/bin/
@@ -207,7 +207,7 @@ jobs:
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
@@ -226,7 +226,7 @@ jobs:
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Restore APT cache (musl)
id: cache_apt_restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: |
/var/cache/apt
@@ -280,7 +280,7 @@ jobs:
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: |
~/.cargo/bin/
@@ -292,7 +292,7 @@ jobs:
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
@@ -317,7 +317,7 @@ jobs:
- name: Save APT cache (musl)
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: |
/var/cache/apt
@@ -369,27 +369,6 @@ jobs:
steps:
- uses: actions/checkout@v6
# We have been running out of space when running this job on Linux for
# x86_64-unknown-linux-gnu, so remove some unnecessary dependencies.
- name: Remove unnecessary dependencies to save space
if: ${{ startsWith(matrix.runner, 'ubuntu') }}
shell: bash
run: |
set -euo pipefail
sudo rm -rf \
/usr/local/lib/android \
/usr/share/dotnet \
/usr/local/share/boost \
/usr/local/lib/node_modules \
/opt/ghc
sudo apt-get remove -y docker.io docker-compose podman buildah
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
@@ -405,7 +384,7 @@ jobs:
- name: Restore cargo home cache
id: cache_cargo_home_restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: |
~/.cargo/bin/
@@ -445,7 +424,7 @@ jobs:
- name: Restore sccache cache (fallback)
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
id: cache_sccache_restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
@@ -468,7 +447,7 @@ jobs:
- name: Save cargo home cache
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: |
~/.cargo/bin/
@@ -480,7 +459,7 @@ jobs:
- name: Save sccache cache (fallback)
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
continue-on-error: true
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.sccache/
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}

View File

@@ -50,9 +50,6 @@ jobs:
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: codex-rs
@@ -84,7 +81,7 @@ jobs:
with:
targets: ${{ matrix.target }}
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
@@ -101,43 +98,176 @@ jobs:
sudo apt-get install -y musl-tools pkg-config
- name: Cargo build
shell: bash
run: |
if [[ "${{ contains(matrix.target, 'windows') }}" == 'true' ]]; then
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy --bin codex-windows-sandbox-setup --bin codex-command-runner
else
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
fi
- if: ${{ contains(matrix.target, 'linux') }}
name: Cosign Linux artifacts
uses: ./.github/actions/linux-code-sign
with:
target: ${{ matrix.target }}
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
- if: ${{ contains(matrix.target, 'windows') }}
name: Sign Windows binaries with Azure Trusted Signing
uses: ./.github/actions/windows-code-sign
with:
target: ${{ matrix.target }}
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: MacOS code signing
uses: ./.github/actions/macos-code-sign
with:
target: ${{ matrix.target }}
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
name: Configure Apple code signing
shell: bash
env:
KEYCHAIN_PASSWORD: actions
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE_P12 }}
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
run: |
set -euo pipefail
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
echo "APPLE_CERTIFICATE is required for macOS signing"
exit 1
fi
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
exit 1
fi
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
security set-keychain-settings -lut 21600 "$keychain_path"
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
keychain_args=()
cleanup_keychain() {
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}" || true
security default-keychain -s "${keychain_args[0]}" || true
else
security list-keychains -s || true
fi
if [[ -f "$keychain_path" ]]; then
security delete-keychain "$keychain_path" || true
fi
}
while IFS= read -r keychain; do
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "$keychain_path" "${keychain_args[@]}"
else
security list-keychains -s "$keychain_path"
fi
security default-keychain -s "$keychain_path"
security import "$cert_path" -k "$keychain_path" -P "$APPLE_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/security
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" "$keychain_path" > /dev/null
codesign_hashes=()
while IFS= read -r hash; do
[[ -n "$hash" ]] && codesign_hashes+=("$hash")
done < <(security find-identity -v -p codesigning "$keychain_path" \
| sed -n 's/.*\([0-9A-F]\{40\}\).*/\1/p' \
| sort -u)
if ((${#codesign_hashes[@]} == 0)); then
echo "No signing identities found in $keychain_path"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
if ((${#codesign_hashes[@]} > 1)); then
echo "Multiple signing identities found in $keychain_path:"
printf ' %s\n' "${codesign_hashes[@]}"
cleanup_keychain
rm -f "$cert_path"
exit 1
fi
APPLE_CODESIGN_IDENTITY="${codesign_hashes[0]}"
rm -f "$cert_path"
echo "APPLE_CODESIGN_IDENTITY=$APPLE_CODESIGN_IDENTITY" >> "$GITHUB_ENV"
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: Sign macOS binaries
shell: bash
run: |
set -euo pipefail
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
exit 1
fi
keychain_args=()
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
fi
for binary in codex codex-responses-api-proxy; do
path="target/${{ matrix.target }}/release/${binary}"
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
done
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
name: Notarize macOS binaries
shell: bash
env:
APPLE_NOTARIZATION_KEY_P8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
APPLE_NOTARIZATION_KEY_ID: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
APPLE_NOTARIZATION_ISSUER_ID: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
run: |
set -euo pipefail
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
if [[ -z "${!var:-}" ]]; then
echo "$var is required for notarization"
exit 1
fi
done
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
cleanup_notary() {
rm -f "$notary_key_path"
}
trap cleanup_notary EXIT
notarize_binary() {
local binary="$1"
local source_path="target/${{ matrix.target }}/release/${binary}"
local archive_path="${RUNNER_TEMP}/${binary}.zip"
if [[ ! -f "$source_path" ]]; then
echo "Binary $source_path not found"
exit 1
fi
rm -f "$archive_path"
ditto -c -k --keepParent "$source_path" "$archive_path"
submission_json=$(xcrun notarytool submit "$archive_path" \
--key "$notary_key_path" \
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
--output-format json \
--wait)
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
if [[ -z "$submission_id" ]]; then
echo "Failed to retrieve submission ID for $binary"
exit 1
fi
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
if [[ "$status" != "Accepted" ]]; then
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
exit 1
fi
}
notarize_binary "codex"
notarize_binary "codex-responses-api-proxy"
- name: Stage artifacts
shell: bash
@@ -148,18 +278,11 @@ jobs:
if [[ "${{ matrix.runner }}" == windows* ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
else
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
fi
if [[ "${{ matrix.target }}" == *linux* ]]; then
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
fi
- if: ${{ matrix.runner == 'windows-11-arm' }}
name: Install zstd
shell: powershell
@@ -198,11 +321,6 @@ jobs:
continue
fi
# Don't try to compress signature bundles.
if [[ "$base" == *.sigstore ]]; then
continue
fi
# Create per-binary tar.gz
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
@@ -222,7 +340,30 @@ jobs:
zstd "${zstd_args[@]}" "$dest/$base"
done
- uses: actions/upload-artifact@v6
- name: Remove signing keychain
if: ${{ always() && matrix.runner == 'macos-15-xlarge' }}
shell: bash
env:
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
run: |
set -euo pipefail
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" ]]; then
keychain_args=()
while IFS= read -r keychain; do
[[ "$keychain" == "$APPLE_CODESIGN_KEYCHAIN" ]] && continue
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
if ((${#keychain_args[@]} > 0)); then
security list-keychains -s "${keychain_args[@]}"
security default-keychain -s "${keychain_args[0]}"
fi
if [[ -f "$APPLE_CODESIGN_KEYCHAIN" ]]; then
security delete-keychain "$APPLE_CODESIGN_KEYCHAIN"
fi
fi
- uses: actions/upload-artifact@v5
with:
name: ${{ matrix.target }}
# Upload the per-binary .zst files as well as the new .tar.gz
@@ -258,7 +399,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- uses: actions/download-artifact@v7
- uses: actions/download-artifact@v4
with:
path: dist
@@ -306,7 +447,7 @@ jobs:
run_install: false
- name: Setup Node.js for npm packaging
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: 22
@@ -357,7 +498,7 @@ jobs:
steps:
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: 22
registry-url: "https://registry.npmjs.org"

View File

@@ -19,7 +19,7 @@ jobs:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: 22
cache: pnpm

View File

@@ -30,7 +30,7 @@ jobs:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
cache: "pnpm"

View File

@@ -113,7 +113,7 @@ jobs:
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
- uses: actions/upload-artifact@v6
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-rust-${{ matrix.target }}
path: artifacts/**
@@ -211,7 +211,7 @@ jobs:
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v6
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
@@ -253,7 +253,7 @@ jobs:
mkdir -p "$dest"
cp bash "$dest/bash"
- uses: actions/upload-artifact@v6
- uses: actions/upload-artifact@v5
with:
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
path: artifacts/**
@@ -280,7 +280,7 @@ jobs:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
@@ -291,7 +291,7 @@ jobs:
run: pnpm --filter @openai/codex-shell-tool-mcp run build
- name: Download build artifacts
uses: actions/download-artifact@v7
uses: actions/download-artifact@v4
with:
path: artifacts
@@ -352,7 +352,7 @@ jobs:
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
- uses: actions/upload-artifact@v6
- uses: actions/upload-artifact@v5
with:
name: codex-shell-tool-mcp-npm
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
@@ -376,7 +376,7 @@ jobs:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
uses: actions/setup-node@v5
with:
node-version: ${{ env.NODE_VERSION }}
registry-url: https://registry.npmjs.org
@@ -386,7 +386,7 @@ jobs:
run: npm install -g npm@latest
- name: Download npm tarball
uses: actions/download-artifact@v7
uses: actions/download-artifact@v4
with:
name: codex-shell-tool-mcp-npm
path: dist/npm

View File

@@ -11,6 +11,7 @@ In the codex-rs folder where the rust code lives:
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- Do not use unsigned integer even if the number cannot be negative.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
@@ -74,7 +75,6 @@ If you dont have the tool:
### Test assertions
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
### Integration tests (core)

View File

@@ -95,10 +95,10 @@ function detectPackageManager() {
return "bun";
}
if (
__dirname.includes(".bun/install/global") ||
__dirname.includes(".bun\\install\\global")
process.env.BUN_INSTALL ||
process.env.BUN_INSTALL_GLOBAL_DIR ||
process.env.BUN_INSTALL_BIN_DIR
) {
return "bun";
}

784
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -34,8 +34,6 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"tui2",
"utils/absolute-path",
"utils/git",
"utils/cache",
"utils/image",
@@ -61,15 +59,15 @@ license = "Apache-2.0"
# Internal
app_test_support = { path = "app-server/tests/common" }
codex-ansi-escape = { path = "ansi-escape" }
codex-api = { path = "codex-api" }
codex-app-server = { path = "app-server" }
codex-app-server-protocol = { path = "app-server-protocol" }
codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-api = { path = "codex-api" }
codex-client = { path = "codex-client" }
codex-chatgpt = { path = "chatgpt" }
codex-common = { path = "common" }
codex-core = { path = "core" }
codex-exec = { path = "exec" }
@@ -90,8 +88,6 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-tui2 = { path = "tui2" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
@@ -100,7 +96,6 @@ codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
codex-windows-sandbox = { path = "windows-sandbox-rs" }
core_test_support = { path = "core/tests/common" }
exec_server_test_support = { path = "exec-server/tests/common" }
mcp-types = { path = "mcp-types" }
mcp_test_support = { path = "mcp-server/tests/common" }
@@ -109,6 +104,7 @@ allocative = "0.3.3"
ansi-to-tui = "7.0.0"
anyhow = "1"
arboard = { version = "3", features = ["wayland-data-control"] }
askama = "0.14"
assert_cmd = "2"
assert_matches = "1.5.0"
async-channel = "2.3.1"
@@ -142,14 +138,14 @@ icu_provider = { version = "2.1", features = ["sync"] }
ignore = "0.4.23"
image = { version = "^0.25.9", default-features = false }
indexmap = "2.12.0"
insta = "1.44.3"
insta = "1.43.2"
itertools = "0.14.0"
keyring = { version = "3.6", default-features = false }
landlock = "0.4.1"
lazy_static = "1"
libc = "0.2.177"
log = "0.4"
lru = "0.16.2"
lru = "0.12.5"
maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
@@ -162,7 +158,6 @@ opentelemetry-appender-tracing = "0.30.0"
opentelemetry-otlp = "0.30.0"
opentelemetry-semantic-conventions = "0.30.0"
opentelemetry_sdk = "0.30.0"
tracing-opentelemetry = "0.31.0"
os_info = "3.12.0"
owo-colors = "4.2.0"
path-absolutize = "3.1.1"
@@ -174,23 +169,22 @@ pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.7"
regex = "1.12.2"
reqwest = "0.12"
rmcp = { version = "0.10.0", default-features = false }
rmcp = { version = "0.9.0", default-features = false }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.46.0"
sentry = "0.34.0"
serde = "1"
serde_json = "1"
serde_with = "3.16"
serde_yaml = "0.9"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
socket2 = "0.6.0"
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
@@ -227,7 +221,7 @@ vt100 = "0.16.2"
walkdir = "2.5.0"
webbrowser = "1.0"
which = "6"
wildmatch = "2.6.1"
wildmatch = "2.5.0"
wiremock = "0.6"
zeroize = "1.8.2"
@@ -294,6 +288,7 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
rmcp = { git = "https://github.com/bolinfest/rust-sdk", branch = "pr556" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }

View File

@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
### Notifications
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS.
### `codex exec` to run Codex programmatically/non-interactively

View File

@@ -15,7 +15,6 @@ workspace = true
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-protocol = { workspace = true }
codex-utils-absolute-path = { workspace = true }
mcp-types = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }

View File

@@ -31,7 +31,6 @@ use std::process::Command;
use ts_rs::TS;
const HEADER: &str = "// GENERATED CODE! DO NOT MODIFY BY HAND!\n\n";
const IGNORED_DEFINITIONS: &[&str] = &["Option<()>"];
#[derive(Clone)]
pub struct GeneratedSchema {
@@ -185,6 +184,7 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
"ServerNotification",
"ServerRequest",
];
const IGNORED_DEFINITIONS: &[&str] = &["Option<()>"];
let namespaced_types = collect_namespaced_types(&schemas);
let mut definitions = Map::new();
@@ -304,11 +304,8 @@ where
out_dir.join(format!("{file_stem}.json"))
};
if !IGNORED_DEFINITIONS.contains(&logical_name) {
write_pretty_json(out_path, &schema_value)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
}
write_pretty_json(out_path, &schema_value)
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
let namespace = match raw_namespace {
Some("v1") | None => None,
Some(ns) => Some(ns.to_string()),

View File

@@ -117,9 +117,9 @@ client_request_definitions! {
params: v2::ThreadListParams,
response: v2::ThreadListResponse,
},
SkillsList => "skills/list" {
params: v2::SkillsListParams,
response: v2::SkillsListResponse,
ThreadCompact => "thread/compact" {
params: v2::ThreadCompactParams,
response: v2::ThreadCompactResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
@@ -139,16 +139,6 @@ client_request_definitions! {
response: v2::ModelListResponse,
},
McpServerOauthLogin => "mcpServer/oauth/login" {
params: v2::McpServerOauthLoginParams,
response: v2::McpServerOauthLoginResponse,
},
McpServersList => "mcpServers/list" {
params: v2::ListMcpServersParams,
response: v2::ListMcpServersResponse,
},
LoginAccount => "account/login/start" {
params: v2::LoginAccountParams,
response: v2::LoginAccountResponse,
@@ -174,12 +164,6 @@ client_request_definitions! {
response: v2::FeedbackUploadResponse,
},
/// Execute a command (argv vector) under the server's sandbox.
OneOffCommandExec => "command/exec" {
params: v2::CommandExecParams,
response: v2::CommandExecResponse,
},
ConfigRead => "config/read" {
params: v2::ConfigReadParams,
response: v2::ConfigReadResponse,
@@ -527,12 +511,8 @@ server_notification_definitions! {
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
McpServerOauthLoginCompleted => "mcpServer/oauthLogin/completed" (v2::McpServerOauthLoginCompletedNotification),
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
ModelPresetsUpdated => "model/presets/updated" (v2::ModelPresetsUpdatedNotification),
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
@@ -655,6 +635,7 @@ mod tests {
command: vec!["echo".to_string(), "hello".to_string()],
cwd: PathBuf::from("/tmp"),
reason: Some("because tests".to_string()),
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
@@ -674,6 +655,7 @@ mod tests {
"command": ["echo", "hello"],
"cwd": "/tmp",
"reason": "because tests",
"risk": null,
"parsedCmd": [
{
"type": "unknown",

View File

@@ -1,15 +0,0 @@
use crate::protocol::v1;
use crate::protocol::v2;
impl From<v1::ExecOneOffCommandParams> for v2::CommandExecParams {
fn from(value: v1::ExecOneOffCommandParams) -> Self {
Self {
command: value.command,
timeout_ms: value
.timeout_ms
.map(|timeout| i64::try_from(timeout).unwrap_or(60_000)),
cwd: value.cwd,
sandbox_policy: value.sandbox_policy.map(std::convert::Into::into),
}
}
}

View File

@@ -2,7 +2,6 @@
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
pub mod common;
mod mappers;
pub mod thread_history;
pub mod v1;
pub mod v2;

View File

@@ -1,6 +1,5 @@
use crate::protocol::v2::ThreadItem;
use crate::protocol::v2::Turn;
use crate::protocol::v2::TurnError;
use crate::protocol::v2::TurnStatus;
use crate::protocol::v2::UserInput;
use codex_protocol::protocol::AgentReasoningEvent;
@@ -143,7 +142,6 @@ impl ThreadHistoryBuilder {
PendingTurn {
id: self.next_turn_id(),
items: Vec::new(),
error: None,
status: TurnStatus::Completed,
}
}
@@ -192,7 +190,6 @@ impl ThreadHistoryBuilder {
struct PendingTurn {
id: String,
items: Vec<ThreadItem>,
error: Option<TurnError>,
status: TurnStatus,
}
@@ -201,7 +198,6 @@ impl From<PendingTurn> for Turn {
Self {
id: value.id,
items: value.items,
error: value.error,
status: value.status,
}
}

View File

@@ -3,20 +3,20 @@ use std::path::PathBuf;
use codex_protocol::ConversationId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::FileChange;
use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxCommandAssessment;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use codex_utils_absolute_path::AbsolutePathBuf;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
@@ -226,6 +226,7 @@ pub struct ExecCommandApprovalParams {
pub command: Vec<String>,
pub cwd: PathBuf,
pub reason: Option<String>,
pub risk: Option<SandboxCommandAssessment>,
pub parsed_cmd: Vec<ParsedCommand>,
}
@@ -360,7 +361,7 @@ pub struct Tools {
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<AbsolutePathBuf>,
pub writable_roots: Vec<PathBuf>,
pub network_access: Option<bool>,
pub exclude_tmpdir_env_var: Option<bool>,
pub exclude_slash_tmp: Option<bool>,

View File

@@ -2,36 +2,26 @@ use std::collections::HashMap;
use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::approvals::SandboxCommandAssessment as CoreSandboxCommandAssessment;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
use codex_protocol::items::TurnItem as CoreTurnItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
use codex_protocol::plan_tool::PlanItemArg as CorePlanItemArg;
use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
use codex_protocol::protocol::SkillScope as CoreSkillScope;
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
use codex_protocol::user_input::UserInput as CoreUserInput;
use codex_utils_absolute_path::AbsolutePathBuf;
use mcp_types::ContentBlock as McpContentBlock;
use mcp_types::Resource as McpResource;
use mcp_types::ResourceTemplate as McpResourceTemplate;
use mcp_types::Tool as McpTool;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
@@ -130,81 +120,21 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "kebab-case")]
#[ts(rename_all = "kebab-case", export_to = "v2/")]
pub enum AskForApproval {
#[serde(rename = "untrusted")]
#[ts(rename = "untrusted")]
UnlessTrusted,
OnFailure,
OnRequest,
Never,
}
impl AskForApproval {
pub fn to_core(self) -> CoreAskForApproval {
match self {
AskForApproval::UnlessTrusted => CoreAskForApproval::UnlessTrusted,
AskForApproval::OnFailure => CoreAskForApproval::OnFailure,
AskForApproval::OnRequest => CoreAskForApproval::OnRequest,
AskForApproval::Never => CoreAskForApproval::Never,
}
}
}
impl From<CoreAskForApproval> for AskForApproval {
fn from(value: CoreAskForApproval) -> Self {
match value {
CoreAskForApproval::UnlessTrusted => AskForApproval::UnlessTrusted,
CoreAskForApproval::OnFailure => AskForApproval::OnFailure,
CoreAskForApproval::OnRequest => AskForApproval::OnRequest,
CoreAskForApproval::Never => AskForApproval::Never,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "kebab-case")]
#[ts(rename_all = "kebab-case", export_to = "v2/")]
pub enum SandboxMode {
ReadOnly,
WorkspaceWrite,
DangerFullAccess,
}
impl SandboxMode {
pub fn to_core(self) -> CoreSandboxMode {
match self {
SandboxMode::ReadOnly => CoreSandboxMode::ReadOnly,
SandboxMode::WorkspaceWrite => CoreSandboxMode::WorkspaceWrite,
SandboxMode::DangerFullAccess => CoreSandboxMode::DangerFullAccess,
}
}
}
impl From<CoreSandboxMode> for SandboxMode {
fn from(value: CoreSandboxMode) -> Self {
match value {
CoreSandboxMode::ReadOnly => SandboxMode::ReadOnly,
CoreSandboxMode::WorkspaceWrite => SandboxMode::WorkspaceWrite,
CoreSandboxMode::DangerFullAccess => SandboxMode::DangerFullAccess,
}
}
}
v2_enum_from_core!(
pub enum ReviewDelivery from codex_protocol::protocol::ReviewDelivery {
Inline, Detached
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
UnlessTrusted, OnFailure, OnRequest, Never
}
);
v2_enum_from_core!(
pub enum McpAuthStatus from codex_protocol::protocol::McpAuthStatus {
Unsupported,
NotLoggedIn,
BearerToken,
OAuth
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
ReadOnly, WorkspaceWrite, DangerFullAccess
}
);
v2_enum_from_core!(
pub enum ReviewDelivery from codex_protocol::protocol::ReviewDelivery {
Inline, Detached
}
);
@@ -218,72 +148,6 @@ pub enum ConfigLayerName {
User,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub struct SandboxWorkspaceWrite {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
#[serde(default)]
pub network_access: bool,
#[serde(default)]
pub exclude_tmpdir_env_var: bool,
#[serde(default)]
pub exclude_slash_tmp: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub struct ToolsV2 {
#[serde(alias = "web_search_request")]
pub web_search: Option<bool>,
pub view_image: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub struct ProfileV2 {
pub model: Option<String>,
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
#[serde(default, flatten)]
pub additional: HashMap<String, JsonValue>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub struct Config {
pub model: Option<String>,
pub review_model: Option<String>,
pub model_context_window: Option<i64>,
pub model_auto_compact_token_limit: Option<i64>,
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox_mode: Option<SandboxMode>,
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
pub forced_chatgpt_workspace_id: Option<String>,
pub forced_login_method: Option<ForcedLoginMethod>,
pub tools: Option<ToolsV2>,
pub profile: Option<String>,
#[serde(default)]
pub profiles: HashMap<String, ProfileV2>,
pub instructions: Option<String>,
pub developer_instructions: Option<String>,
pub compact_prompt: Option<String>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
#[serde(default, flatten)]
pub additional: HashMap<String, JsonValue>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -334,8 +198,6 @@ pub struct OverriddenMetadata {
pub struct ConfigWriteResponse {
pub status: WriteStatus,
pub version: String,
/// Canonical path to the config file that was written.
pub file_path: String,
pub overridden_metadata: Option<OverriddenMetadata>,
}
@@ -362,7 +224,7 @@ pub struct ConfigReadParams {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigReadResponse {
pub config: Config,
pub config: JsonValue,
pub origins: HashMap<String, ConfigLayerMetadata>,
#[serde(skip_serializing_if = "Option::is_none")]
pub layers: Option<Vec<ConfigLayer>>,
@@ -372,11 +234,10 @@ pub struct ConfigReadResponse {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigValueWriteParams {
pub file_path: String,
pub key_path: String,
pub value: JsonValue,
pub merge_strategy: MergeStrategy,
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
pub file_path: Option<String>,
pub expected_version: Option<String>,
}
@@ -384,9 +245,8 @@ pub struct ConfigValueWriteParams {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigBatchWriteParams {
pub file_path: String,
pub edits: Vec<ConfigEdit>,
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
pub file_path: Option<String>,
pub expected_version: Option<String>,
}
@@ -399,16 +259,19 @@ pub struct ConfigEdit {
pub merge_strategy: MergeStrategy,
}
v2_enum_from_core!(
pub enum CommandRiskLevel from codex_protocol::approvals::SandboxRiskLevel {
Low,
Medium,
High
}
);
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum ApprovalDecision {
Accept,
/// Approve and remember the approval for the session.
AcceptForSession,
AcceptWithExecpolicyAmendment {
execpolicy_amendment: ExecPolicyAmendment,
},
Decline,
Cancel,
}
@@ -424,7 +287,7 @@ pub enum SandboxPolicy {
#[ts(rename_all = "camelCase")]
WorkspaceWrite {
#[serde(default)]
writable_roots: Vec<AbsolutePathBuf>,
writable_roots: Vec<PathBuf>,
#[serde(default)]
network_access: bool,
#[serde(default)]
@@ -478,23 +341,28 @@ impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(transparent)]
#[ts(type = "Array<string>", export_to = "v2/")]
pub struct ExecPolicyAmendment {
pub command: Vec<String>,
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SandboxCommandAssessment {
pub description: String,
pub risk_level: CommandRiskLevel,
}
impl ExecPolicyAmendment {
pub fn into_core(self) -> CoreExecPolicyAmendment {
CoreExecPolicyAmendment::new(self.command)
impl SandboxCommandAssessment {
pub fn into_core(self) -> CoreSandboxCommandAssessment {
CoreSandboxCommandAssessment {
description: self.description,
risk_level: self.risk_level.to_core(),
}
}
}
impl From<CoreExecPolicyAmendment> for ExecPolicyAmendment {
fn from(value: CoreExecPolicyAmendment) -> Self {
impl From<CoreSandboxCommandAssessment> for SandboxCommandAssessment {
fn from(value: CoreSandboxCommandAssessment) -> Self {
Self {
command: value.command().to_vec(),
description: value.description,
risk_level: CommandRiskLevel::from(value.risk_level),
}
}
}
@@ -672,21 +540,10 @@ pub struct CancelLoginAccountParams {
pub login_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CancelLoginAccountStatus {
Canceled,
NotFound,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CancelLoginAccountResponse {
pub status: CancelLoginAccountStatus,
}
pub struct CancelLoginAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
@@ -751,66 +608,20 @@ pub struct ReasoningEffortOption {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelListResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ListMcpServersParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a server-defined value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServer {
pub name: String,
pub tools: std::collections::HashMap<String, McpTool>,
pub resources: Vec<McpResource>,
pub resource_templates: Vec<McpResourceTemplate>,
pub auth_status: McpAuthStatus,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ListMcpServersResponse {
pub data: Vec<McpServer>,
pub struct ModelListResponse {
pub data: Vec<Model>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServerOauthLoginParams {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub scopes: Option<Vec<String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub timeout_secs: Option<i64>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServerOauthLoginResponse {
pub authorization_url: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FeedbackUploadParams {
pub classification: String,
pub reason: Option<String>,
pub thread_id: Option<String>,
pub conversation_id: Option<ConversationId>,
pub include_logs: bool,
}
@@ -821,26 +632,6 @@ pub struct FeedbackUploadResponse {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecParams {
pub command: Vec<String>,
#[ts(type = "number | null")]
pub timeout_ms: Option<i64>,
pub cwd: Option<PathBuf>,
pub sandbox_policy: Option<SandboxPolicy>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecResponse {
pub exit_code: i32,
pub stdout: String,
pub stderr: String,
}
// === Threads, Turns, and Items ===
// Thread APIs
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
@@ -956,83 +747,14 @@ pub struct ThreadListResponse {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsListParams {
/// When empty, defaults to the current session working directory.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub cwds: Vec<PathBuf>,
pub struct ThreadCompactParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsListResponse {
pub data: Vec<SkillsListEntry>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub enum SkillScope {
User,
Repo,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillMetadata {
pub name: String,
pub description: String,
pub path: PathBuf,
pub scope: SkillScope,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillErrorInfo {
pub path: PathBuf,
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsListEntry {
pub cwd: PathBuf,
pub skills: Vec<SkillMetadata>,
pub errors: Vec<SkillErrorInfo>,
}
impl From<CoreSkillMetadata> for SkillMetadata {
fn from(value: CoreSkillMetadata) -> Self {
Self {
name: value.name,
description: value.description,
path: value.path,
scope: value.scope.into(),
}
}
}
impl From<CoreSkillScope> for SkillScope {
fn from(value: CoreSkillScope) -> Self {
match value {
CoreSkillScope::User => Self::User,
CoreSkillScope::Repo => Self::Repo,
}
}
}
impl From<CoreSkillErrorInfo> for SkillErrorInfo {
fn from(value: CoreSkillErrorInfo) -> Self {
Self {
path: value.path,
message: value.message,
}
}
}
pub struct ThreadCompactResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
@@ -1044,7 +766,6 @@ pub struct Thread {
/// Model provider used for this thread (for example, 'openai').
pub model_provider: String,
/// Unix timestamp (in seconds) when the thread was created.
#[ts(type = "number")]
pub created_at: i64,
/// [UNSTABLE] Path to the thread on disk.
pub path: PathBuf,
@@ -1069,13 +790,6 @@ pub struct AccountUpdatedNotification {
pub auth_mode: Option<AuthMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelPresetsUpdatedNotification {
pub models: Vec<Model>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1142,9 +856,8 @@ pub struct Turn {
/// For all other responses and notifications returning a Turn,
/// the items field will be an empty list.
pub items: Vec<ThreadItem>,
#[serde(flatten)]
pub status: TurnStatus,
/// Only populated when the Turn's status is failed.
pub error: Option<TurnError>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, Error)]
@@ -1161,20 +874,17 @@ pub struct TurnError {
#[ts(export_to = "v2/")]
pub struct ErrorNotification {
pub error: TurnError,
// Set to true if the error is transient and the app-server process will automatically retry.
// If true, this will not interrupt a turn.
pub will_retry: bool,
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
#[serde(tag = "status", rename_all = "camelCase")]
#[ts(tag = "status", export_to = "v2/")]
pub enum TurnStatus {
Completed,
Interrupted,
Failed,
Failed { error: TurnError },
InProgress,
}
@@ -1343,7 +1053,6 @@ pub enum ThreadItem {
/// The command's exit code.
exit_code: Option<i32>,
/// The duration of the command execution in milliseconds.
#[ts(type = "number | null")]
duration_ms: Option<i64>,
},
#[serde(rename_all = "camelCase")]
@@ -1363,15 +1072,15 @@ pub enum ThreadItem {
arguments: JsonValue,
result: Option<McpToolCallResult>,
error: Option<McpToolCallError>,
/// The duration of the MCP tool call in milliseconds.
#[ts(type = "number | null")]
duration_ms: Option<i64>,
},
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
WebSearch { id: String, query: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
TodoList { id: String, items: Vec<TodoItem> },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ImageView { id: String, path: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
@@ -1474,6 +1183,15 @@ pub struct McpToolCallError {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TodoItem {
pub id: String,
pub text: String,
pub completed: bool,
}
// === Server Notifications ===
// Thread/Turn lifecycle notifications and item progress events
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1523,7 +1241,6 @@ pub struct TurnDiffUpdatedNotification {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TurnPlanUpdatedNotification {
pub thread_id: String,
pub turn_id: String,
pub explanation: Option<String>,
pub plan: Vec<TurnPlanStep>,
@@ -1602,7 +1319,6 @@ pub struct ReasoningSummaryTextDeltaNotification {
pub turn_id: String,
pub item_id: String,
pub delta: String,
#[ts(type = "number")]
pub summary_index: i64,
}
@@ -1613,7 +1329,6 @@ pub struct ReasoningSummaryPartAddedNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
#[ts(type = "number")]
pub summary_index: i64,
}
@@ -1625,21 +1340,9 @@ pub struct ReasoningTextDeltaNotification {
pub turn_id: String,
pub item_id: String,
pub delta: String,
#[ts(type = "number")]
pub content_index: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TerminalInteractionNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub process_id: String,
pub stdin: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1650,16 +1353,6 @@ pub struct CommandExecutionOutputDeltaNotification {
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct FileChangeOutputDeltaNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1670,17 +1363,6 @@ pub struct McpToolCallProgressNotification {
pub message: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServerOauthLoginCompletedNotification {
pub name: String,
pub success: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1707,8 +1389,17 @@ pub struct CommandExecutionRequestApprovalParams {
pub item_id: String,
/// Optional explanatory reason (e.g. request for network access).
pub reason: Option<String>,
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
/// Optional model-provided risk assessment describing the blocked command.
pub risk: Option<SandboxCommandAssessment>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CommandExecutionRequestAcceptSettings {
/// If true, automatically approve this command for the duration of the session.
#[serde(default)]
pub for_session: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1716,6 +1407,10 @@ pub struct CommandExecutionRequestApprovalParams {
#[ts(export_to = "v2/")]
pub struct CommandExecutionRequestApprovalResponse {
pub decision: ApprovalDecision,
/// Optional approval settings for when the decision is `accept`.
/// Ignored if the decision is `decline` or `cancel`.
#[serde(default)]
pub accept_settings: Option<CommandExecutionRequestAcceptSettings>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1752,7 +1447,6 @@ pub struct RateLimitSnapshot {
pub primary: Option<RateLimitWindow>,
pub secondary: Option<RateLimitWindow>,
pub credits: Option<CreditsSnapshot>,
pub plan_type: Option<PlanType>,
}
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
@@ -1761,7 +1455,6 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
primary: value.primary.map(RateLimitWindow::from),
secondary: value.secondary.map(RateLimitWindow::from),
credits: value.credits.map(CreditsSnapshot::from),
plan_type: value.plan_type,
}
}
}
@@ -1771,9 +1464,7 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
#[ts(export_to = "v2/")]
pub struct RateLimitWindow {
pub used_percent: i32,
#[ts(type = "number | null")]
pub window_duration_mins: Option<i64>,
#[ts(type = "number | null")]
pub resets_at: Option<i64>,
}

View File

@@ -21,6 +21,7 @@ use codex_app_server_protocol::ApprovalDecision;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::CommandExecutionRequestAcceptSettings;
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::FileChangeRequestApprovalParams;
@@ -553,10 +554,6 @@ impl CodexClient {
print!("{}", delta.delta);
std::io::stdout().flush().ok();
}
ServerNotification::TerminalInteraction(delta) => {
println!("[stdin sent: {}]", delta.stdin);
std::io::stdout().flush().ok();
}
ServerNotification::ItemStarted(payload) => {
println!("\n< item started: {:?}", payload.item);
}
@@ -566,9 +563,7 @@ impl CodexClient {
ServerNotification::TurnCompleted(payload) => {
if payload.turn.id == turn_id {
println!("\n< turn/completed notification: {:?}", payload.turn.status);
if payload.turn.status == TurnStatus::Failed
&& let Some(error) = payload.turn.error
{
if let TurnStatus::Failed { error } = &payload.turn.status {
println!("[turn error] {}", error.message);
}
break;
@@ -756,7 +751,7 @@ impl CodexClient {
turn_id,
item_id,
reason,
proposed_execpolicy_amendment,
risk,
} = params;
println!(
@@ -765,12 +760,13 @@ impl CodexClient {
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
if let Some(risk) = risk.as_ref() {
println!("< risk assessment: {risk:?}");
}
let response = CommandExecutionRequestApprovalResponse {
decision: ApprovalDecision::Accept,
accept_settings: Some(CommandExecutionRequestAcceptSettings { for_session: false }),
};
self.send_server_request_response(request_id, &response)?;
println!("< approved commandExecution request for item {item_id}");

View File

@@ -26,12 +26,11 @@ codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-feedback = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-utils-json-to-toml = { workspace = true }
chrono = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
mcp-types = { workspace = true }
sha2 = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
@@ -43,6 +42,7 @@ tokio = { workspace = true, features = [
] }
tracing = { workspace = true, features = ["log"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
opentelemetry-appender-tracing = { workspace = true }
uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]

View File

@@ -1,15 +1,15 @@
# codex-app-server
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt).
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
## Table of Contents
- [Protocol](#protocol)
- [Message Schema](#message-schema)
- [Core Primitives](#core-primitives)
- [Lifecycle Overview](#lifecycle-overview)
- [Initialization](#initialization)
- [API Overview](#api-overview)
- [Events](#events)
- [Core primitives](#core-primitives)
- [Thread & turn endpoints](#thread--turn-endpoints)
- [Events (work-in-progress)](#events-work-in-progress)
- [Auth endpoints](#auth-endpoints)
## Protocol
@@ -25,15 +25,6 @@ codex app-server generate-ts --out DIR
codex app-server generate-json-schema --out DIR
```
## Core Primitives
The API exposes three top level primitives representing an interaction between a user and Codex:
- **Thread**: A conversation between a user and the Codex agent. Each thread contains multiple turns.
- **Turn**: One turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- **Item**: Represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations. Example items include user message, agent reasoning, agent message, shell command, file edit, etc.
Use the thread APIs to create, list, or archive conversations. Drive a conversation with turn APIs and stream progress via turn notifications.
## Lifecycle Overview
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
@@ -46,16 +37,28 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
Applications building on top of `codex app-server` should identify themselves via the `clientInfo` parameter.
Example:
Example (from OpenAI's official VSCode extension):
```json
{ "method": "initialize", "id": 0, "params": {
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
} }
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
{ "method": "initialized" }
```
## API Overview
## Core primitives
We have 3 top level primitives:
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
## Thread & turn endpoints
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
### Quick reference
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
@@ -63,18 +66,8 @@ Example (from OpenAI's official VSCode extension):
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — request the available models; responds with `{}` and asynchronously emits `model/presets/updated` containing the catalog.
- `skills/list` — list skills for one or more `cwd` values.
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
- `mcpServers/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
### Example: Start or resume a thread
### 1) Start or resume a thread
Start a fresh thread when you need a new Codex conversation.
@@ -105,7 +98,7 @@ To continue a stored session, call `thread/resume` with the `thread.id` you prev
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
### Example: List threads (with pagination & filters)
### 2) List threads (pagination & filters)
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
@@ -130,7 +123,7 @@ Example:
When `nextCursor` is `null`, youve reached the final page.
### Example: Archive a thread
### 3) Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
@@ -141,7 +134,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
An archived thread will not appear in future calls to `thread/list`.
### Example: Start a turn (send user input)
### 4) Start a turn (send user input)
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
@@ -175,7 +168,7 @@ You can optionally specify config overrides on the new turn. If specified, these
} } }
```
### Example: Interrupt an active turn
### 5) Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
@@ -189,7 +182,7 @@ You can cancel a running Turn with `turn/interrupt`.
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
### Example: Request a code review
### 6) Request a code review
Use `review/start` to run Codexs reviewer on the currently checked-out project. The request takes the thread id plus a `target` describing what should be reviewed:
@@ -248,26 +241,7 @@ containing an `exitedReviewMode` item with the final review text:
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::ExitedReviewMode` in the generated schema). Use this notification to render the reviewer output in your client.
### Example: One-off command execution
Run a standalone command (argv vector) in the servers sandbox without creating a thread or turn:
```json
{ "method": "command/exec", "id": 32, "params": {
"command": ["ls", "-la"],
"cwd": "/Users/me/project", // optional; defaults to server cwd
"sandboxPolicy": { "type": "workspaceWrite" }, // optional; defaults to user config
"timeoutMs": 10000 // optional; ms timeout; defaults to server timeout
} }
{ "id": 32, "result": { "exitCode": 0, "stdout": "...", "stderr": "" } }
```
Notes:
- Empty `command` arrays are rejected.
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags).
- When omitted, `timeoutMs` falls back to the server default.
## Events
## Events (work-in-progress)
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
@@ -277,12 +251,11 @@ The app-server streams JSON-RPC notifications while a turn is running. Each turn
- `turn/started``{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
- `turn/completed``{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
- `turn/diff/updated``{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
- `turn/plan/updated``{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
#### Items
#### Thread items
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
- `userMessage``{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
@@ -292,9 +265,6 @@ Today both notifications carry an empty `items` array even when item events were
- `fileChange``{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `mcpToolCall``{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
- `webSearch``{id, query}` for a web search request issued by the agent.
- `imageView``{id, path}` emitted when the agent invokes the image viewer tool.
- `enteredReviewMode``{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
- `exitedReviewMode``{id, review}` emitted when the reviewer finishes; `review` is the full plain-text review (usually, overall notes plus bullet point findings).
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
All items emit two shared lifecycle events:
@@ -312,7 +282,7 @@ There are additional item-specific events:
- `item/commandExecution/outputDelta` — streams stdout/stderr for the command; append deltas in order to render live output alongside `aggregatedOutput` in the final item.
Final `commandExecution` items include parsed `commandActions`, `status`, `exitCode`, and `durationMs` so the UI can summarize what ran and whether it succeeded.
#### fileChange
- `item/fileChange/outputDelta` - contains the tool call response of the underlying `apply_patch` tool call.
`fileChange` items contain a `changes` list with `{path, kind, diff}` entries (`kind` is `add`, `delete`, or `update` with an optional `movePath`). The `status` tracks whether apply succeeded (`completed`), failed, or was `declined`.
### Errors
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
@@ -361,7 +331,7 @@ UI guidance for IDEs: surface an approval dialog as soon as the request arrives.
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### API Overview
### Quick reference
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
@@ -369,8 +339,6 @@ The JSON-RPC auth/account surface exposes request/response methods plus server-i
- `account/logout` — sign out; triggers `account/updated`.
- `account/updated` (notify) — emitted whenever auth mode changes (`authMode`: `apikey`, `chatgpt`, or `null`).
- `account/rateLimits/read` — fetch ChatGPT rate limits; updates arrive via `account/rateLimits/updated` (notify).
- `account/rateLimits/updated` (notify) — emitted whenever a user's ChatGPT rate limits change.
- `mcpServer/oauthLogin/completed` (notify) — emitted after a `mcpServer/oauth/login` flow finishes for a server; payload includes `{ name, success, error? }`.
### 1) Check auth state
@@ -448,3 +416,9 @@ Field notes:
- `usedPercent` is current usage within the OpenAI quota window.
- `windowDurationMins` is the quota window length.
- `resetsAt` is a Unix timestamp (seconds) for the next reset.
### Dev notes
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.

View File

@@ -18,8 +18,6 @@ use codex_app_server_protocol::ContextCompactedNotification;
use codex_app_server_protocol::ErrorNotification;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::ExecCommandApprovalResponse;
use codex_app_server_protocol::ExecPolicyAmendment as V2ExecPolicyAmendment;
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
use codex_app_server_protocol::FileChangeRequestApprovalParams;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::FileUpdateChange;
@@ -34,9 +32,9 @@ use codex_app_server_protocol::PatchChangeKind as V2PatchChangeKind;
use codex_app_server_protocol::ReasoningSummaryPartAddedNotification;
use codex_app_server_protocol::ReasoningSummaryTextDeltaNotification;
use codex_app_server_protocol::ReasoningTextDeltaNotification;
use codex_app_server_protocol::SandboxCommandAssessment as V2SandboxCommandAssessment;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::TerminalInteractionNotification;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadTokenUsage;
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
@@ -63,7 +61,6 @@ use codex_core::protocol::ReviewDecision;
use codex_core::protocol::TokenCountEvent;
use codex_core::protocol::TurnDiffEvent;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_protocol::ConversationId;
use codex_protocol::plan_tool::UpdatePlanArgs;
use codex_protocol::protocol::ReviewOutputEvent;
@@ -179,7 +176,7 @@ pub(crate) async fn apply_bespoke_event_handling(
command,
cwd,
reason,
proposed_execpolicy_amendment,
risk,
parsed_cmd,
}) => match api_version {
ApiVersion::V1 => {
@@ -189,6 +186,7 @@ pub(crate) async fn apply_bespoke_event_handling(
command,
cwd,
reason,
risk,
parsed_cmd,
};
let rx = outgoing
@@ -206,8 +204,6 @@ pub(crate) async fn apply_bespoke_event_handling(
.map(V2ParsedCommand::from)
.collect::<Vec<_>>();
let command_string = shlex_join(&command);
let proposed_execpolicy_amendment_v2 =
proposed_execpolicy_amendment.map(V2ExecPolicyAmendment::from);
let params = CommandExecutionRequestApprovalParams {
thread_id: conversation_id.to_string(),
@@ -216,7 +212,7 @@ pub(crate) async fn apply_bespoke_event_handling(
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
item_id: item_id.clone(),
reason,
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
risk: risk.map(V2SandboxCommandAssessment::from),
};
let rx = outgoing
.send_request(ServerRequestPayload::CommandExecutionRequestApproval(
@@ -334,7 +330,6 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing
.send_server_notification(ServerNotification::Error(ErrorNotification {
error: turn_error,
will_retry: false,
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
}))
@@ -350,38 +345,13 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing
.send_server_notification(ServerNotification::Error(ErrorNotification {
error: turn_error,
will_retry: true,
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
}))
.await;
}
EventMsg::ViewImageToolCall(view_image_event) => {
let item = ThreadItem::ImageView {
id: view_image_event.call_id.clone(),
path: view_image_event.path.to_string_lossy().into_owned(),
};
let started = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item: item.clone(),
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(started))
.await;
let completed = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(completed))
.await;
}
EventMsg::EnteredReviewMode(review_request) => {
let review = review_request
.user_facing_hint
.unwrap_or_else(|| review_prompts::user_facing_hint(&review_request.target));
let review = review_request.user_facing_hint;
let item = ThreadItem::EnteredReviewMode {
id: event_turn_id.clone(),
review,
@@ -531,57 +501,16 @@ pub(crate) async fn apply_bespoke_event_handling(
.await;
}
EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => {
let item_id = exec_command_output_delta_event.call_id.clone();
let delta = String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string();
// The underlying EventMsg::ExecCommandOutputDelta is used for shell, unified_exec,
// and apply_patch tool calls. We represent apply_patch with the FileChange item, and
// everything else with the CommandExecution item.
//
// We need to detect which item type it is so we can emit the right notification.
// We already have state tracking FileChange items on item/started, so let's use that.
let is_file_change = {
let map = turn_summary_store.lock().await;
map.get(&conversation_id)
.is_some_and(|summary| summary.file_change_started.contains(&item_id))
};
if is_file_change {
let notification = FileChangeOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
delta,
};
outgoing
.send_server_notification(ServerNotification::FileChangeOutputDelta(
notification,
))
.await;
} else {
let notification = CommandExecutionOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
delta,
};
outgoing
.send_server_notification(ServerNotification::CommandExecutionOutputDelta(
notification,
))
.await;
}
}
EventMsg::TerminalInteraction(terminal_event) => {
let item_id = terminal_event.call_id.clone();
let notification = TerminalInteractionNotification {
let notification = CommandExecutionOutputDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
process_id: terminal_event.process_id,
stdin: terminal_event.stdin,
item_id: exec_command_output_delta_event.call_id.clone(),
delta: String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string(),
};
outgoing
.send_server_notification(ServerNotification::TerminalInteraction(notification))
.send_server_notification(ServerNotification::CommandExecutionOutputDelta(
notification,
))
.await;
}
EventMsg::ExecCommandEnd(exec_command_end_event) => {
@@ -679,7 +608,6 @@ pub(crate) async fn apply_bespoke_event_handling(
}
EventMsg::PlanUpdate(plan_update_event) => {
handle_turn_plan_update(
conversation_id,
&event_turn_id,
plan_update_event,
api_version,
@@ -712,7 +640,6 @@ async fn handle_turn_diff(
}
async fn handle_turn_plan_update(
conversation_id: ConversationId,
event_turn_id: &str,
plan_update_event: UpdatePlanArgs,
api_version: ApiVersion,
@@ -720,7 +647,6 @@ async fn handle_turn_plan_update(
) {
if let ApiVersion::V2 = api_version {
let notification = TurnPlanUpdatedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.to_string(),
explanation: plan_update_event.explanation,
plan: plan_update_event
@@ -739,7 +665,6 @@ async fn emit_turn_completed_with_status(
conversation_id: ConversationId,
event_turn_id: String,
status: TurnStatus,
error: Option<TurnError>,
outgoing: &OutgoingMessageSender,
) {
let notification = TurnCompletedNotification {
@@ -747,7 +672,6 @@ async fn emit_turn_completed_with_status(
turn: Turn {
id: event_turn_id,
items: vec![],
error,
status,
},
};
@@ -836,12 +760,13 @@ async fn handle_turn_complete(
) {
let turn_summary = find_and_remove_turn_summary(conversation_id, turn_summary_store).await;
let (status, error) = match turn_summary.last_error {
Some(error) => (TurnStatus::Failed, Some(error)),
None => (TurnStatus::Completed, None),
let status = if let Some(error) = turn_summary.last_error {
TurnStatus::Failed { error }
} else {
TurnStatus::Completed
};
emit_turn_completed_with_status(conversation_id, event_turn_id, status, error, outgoing).await;
emit_turn_completed_with_status(conversation_id, event_turn_id, status, outgoing).await;
}
async fn handle_turn_interrupted(
@@ -856,7 +781,6 @@ async fn handle_turn_interrupted(
conversation_id,
event_turn_id,
TurnStatus::Interrupted,
None,
outgoing,
)
.await;
@@ -1062,11 +986,7 @@ async fn on_file_change_request_approval_response(
});
let (decision, completion_status) = match response.decision {
ApprovalDecision::Accept
| ApprovalDecision::AcceptForSession
| ApprovalDecision::AcceptWithExecpolicyAmendment { .. } => {
(ReviewDecision::Approved, None)
}
ApprovalDecision::Accept => (ReviewDecision::Approved, None),
ApprovalDecision::Decline => {
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
}
@@ -1128,27 +1048,25 @@ async fn on_command_execution_request_approval_response(
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
CommandExecutionRequestApprovalResponse {
decision: ApprovalDecision::Decline,
accept_settings: None,
}
});
let decision = response.decision;
let CommandExecutionRequestApprovalResponse {
decision,
accept_settings,
} = response;
let (decision, completion_status) = match decision {
ApprovalDecision::Accept => (ReviewDecision::Approved, None),
ApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
ApprovalDecision::AcceptWithExecpolicyAmendment {
execpolicy_amendment,
} => (
ReviewDecision::ApprovedExecpolicyAmendment {
proposed_execpolicy_amendment: execpolicy_amendment.into_core(),
},
None,
),
ApprovalDecision::Decline => (
let (decision, completion_status) = match (decision, accept_settings) {
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
(ReviewDecision::ApprovedForSession, None)
}
(ApprovalDecision::Accept, _) => (ReviewDecision::Approved, None),
(ApprovalDecision::Decline, _) => (
ReviewDecision::Denied,
Some(CommandExecutionStatus::Declined),
),
ApprovalDecision::Cancel => (
(ApprovalDecision::Cancel, _) => (
ReviewDecision::Abort,
Some(CommandExecutionStatus::Declined),
),
@@ -1201,7 +1119,6 @@ async fn construct_mcp_tool_call_notification(
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
result: None,
error: None,
duration_ms: None,
};
ItemStartedNotification {
thread_id,
@@ -1210,7 +1127,7 @@ async fn construct_mcp_tool_call_notification(
}
}
/// similar to handle_mcp_tool_call_end in exec
/// simiilar to handle_mcp_tool_call_end in exec
async fn construct_mcp_tool_call_end_notification(
end_event: McpToolCallEndEvent,
thread_id: String,
@@ -1221,7 +1138,6 @@ async fn construct_mcp_tool_call_end_notification(
} else {
McpToolCallStatus::Failed
};
let duration_ms = i64::try_from(end_event.duration.as_millis()).ok();
let (result, error) = match &end_event.result {
Ok(value) => (
@@ -1247,7 +1163,6 @@ async fn construct_mcp_tool_call_end_notification(
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
result,
error,
duration_ms,
};
ItemCompletedNotification {
thread_id,
@@ -1338,7 +1253,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Completed);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1379,7 +1293,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Interrupted);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1419,13 +1332,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, event_turn_id);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "bad".to_string(),
codex_error_info: Some(V2CodexErrorInfo::Other),
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "bad".to_string(),
codex_error_info: Some(V2CodexErrorInfo::Other),
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1452,16 +1366,7 @@ mod tests {
],
};
let conversation_id = ConversationId::new();
handle_turn_plan_update(
conversation_id,
"turn-123",
update,
ApiVersion::V2,
&outgoing,
)
.await;
handle_turn_plan_update("turn-123", update, ApiVersion::V2, &outgoing).await;
let msg = rx
.recv()
@@ -1469,7 +1374,6 @@ mod tests {
.ok_or_else(|| anyhow!("should send one notification"))?;
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnPlanUpdated(n)) => {
assert_eq!(n.thread_id, conversation_id.to_string());
assert_eq!(n.turn_id, "turn-123");
assert_eq!(n.explanation.as_deref(), Some("need plan"));
assert_eq!(n.plan.len(), 2);
@@ -1520,7 +1424,6 @@ mod tests {
unlimited: false,
balance: Some("5".to_string()),
}),
plan_type: None,
};
handle_token_count_event(
@@ -1625,7 +1528,6 @@ mod tests {
arguments: serde_json::json!({"server": ""}),
result: None,
error: None,
duration_ms: None,
},
};
@@ -1698,13 +1600,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn1);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "a1".to_string(),
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "a1".to_string(),
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1718,13 +1621,14 @@ mod tests {
match msg {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, b_turn1);
assert_eq!(n.turn.status, TurnStatus::Failed);
assert_eq!(
n.turn.error,
Some(TurnError {
message: "b1".to_string(),
codex_error_info: None,
})
n.turn.status,
TurnStatus::Failed {
error: TurnError {
message: "b1".to_string(),
codex_error_info: None,
}
}
);
}
other => bail!("unexpected message: {other:?}"),
@@ -1739,7 +1643,6 @@ mod tests {
OutgoingMessage::AppServerNotification(ServerNotification::TurnCompleted(n)) => {
assert_eq!(n.turn.id, a_turn2);
assert_eq!(n.turn.status, TurnStatus::Completed);
assert_eq!(n.turn.error, None);
}
other => bail!("unexpected message: {other:?}"),
}
@@ -1779,7 +1682,6 @@ mod tests {
arguments: JsonValue::Null,
result: None,
error: None,
duration_ms: None,
},
};
@@ -1833,7 +1735,6 @@ mod tests {
structured_content: None,
}),
error: None,
duration_ms: Some(0),
},
};
@@ -1875,7 +1776,6 @@ mod tests {
error: Some(McpToolCallError {
message: "boom".to_string(),
}),
duration_ms: Some(1),
},
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,27 +1,65 @@
use crate::error_code::INTERNAL_ERROR_CODE;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use anyhow::anyhow;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigLayer;
use codex_app_server_protocol::ConfigLayerMetadata;
use codex_app_server_protocol::ConfigLayerName;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigReadResponse;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWriteErrorCode;
use codex_app_server_protocol::ConfigWriteResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_core::config::ConfigService;
use codex_core::config::ConfigServiceError;
use codex_app_server_protocol::MergeStrategy;
use codex_app_server_protocol::OverriddenMetadata;
use codex_app_server_protocol::WriteStatus;
use codex_core::config::ConfigToml;
use codex_core::config_loader::LoadedConfigLayers;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::load_config_layers_with_overrides;
use codex_core::config_loader::merge_toml_values;
use serde_json::Value as JsonValue;
use serde_json::json;
use sha2::Digest;
use sha2::Sha256;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tokio::task;
use toml::Value as TomlValue;
const SESSION_FLAGS_SOURCE: &str = "--config";
const MDM_SOURCE: &str = "com.openai.codex/config_toml_base64";
const CONFIG_FILE_NAME: &str = "config.toml";
#[derive(Clone)]
pub(crate) struct ConfigApi {
service: ConfigService,
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
}
impl ConfigApi {
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
Self {
service: ConfigService::new(codex_home, cli_overrides),
codex_home,
cli_overrides,
loader_overrides: LoaderOverrides::default(),
}
}
#[cfg(test)]
fn with_overrides(
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
) -> Self {
Self {
codex_home,
cli_overrides,
loader_overrides,
}
}
@@ -29,32 +67,646 @@ impl ConfigApi {
&self,
params: ConfigReadParams,
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
self.service.read(params).await.map_err(map_error)
let layers = self
.load_layers_state()
.await
.map_err(|err| internal_error("failed to read configuration layers", err))?;
let effective = layers.effective_config();
validate_config(&effective).map_err(|err| internal_error("invalid configuration", err))?;
let response = ConfigReadResponse {
config: to_json_value(&effective),
origins: layers.origins(),
layers: params.include_layers.then(|| layers.layers_high_to_low()),
};
Ok(response)
}
pub(crate) async fn write_value(
&self,
params: ConfigValueWriteParams,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
self.service.write_value(params).await.map_err(map_error)
let edits = vec![(params.key_path, params.value, params.merge_strategy)];
self.apply_edits(params.file_path, params.expected_version, edits)
.await
}
pub(crate) async fn batch_write(
&self,
params: ConfigBatchWriteParams,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
self.service.batch_write(params).await.map_err(map_error)
let edits = params
.edits
.into_iter()
.map(|edit| (edit.key_path, edit.value, edit.merge_strategy))
.collect();
self.apply_edits(params.file_path, params.expected_version, edits)
.await
}
async fn apply_edits(
&self,
file_path: String,
expected_version: Option<String>,
edits: Vec<(String, JsonValue, MergeStrategy)>,
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
let allowed_path = self.codex_home.join(CONFIG_FILE_NAME);
if !paths_match(&allowed_path, &file_path) {
return Err(config_write_error(
ConfigWriteErrorCode::ConfigLayerReadonly,
"Only writes to the user config are allowed",
));
}
let layers = self
.load_layers_state()
.await
.map_err(|err| internal_error("failed to load configuration", err))?;
if let Some(expected) = expected_version.as_deref()
&& expected != layers.user.version
{
return Err(config_write_error(
ConfigWriteErrorCode::ConfigVersionConflict,
"Configuration was modified since last read. Fetch latest version and retry.",
));
}
let mut user_config = layers.user.config.clone();
let mut mutated = false;
let mut parsed_segments = Vec::new();
for (key_path, value, strategy) in edits.into_iter() {
let segments = parse_key_path(&key_path).map_err(|message| {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
})?;
let parsed_value = parse_value(value).map_err(|message| {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
})?;
let changed = apply_merge(&mut user_config, &segments, parsed_value.as_ref(), strategy)
.map_err(|err| match err {
MergeError::PathNotFound => config_write_error(
ConfigWriteErrorCode::ConfigPathNotFound,
"Path not found",
),
MergeError::Validation(message) => {
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
}
})?;
mutated |= changed;
parsed_segments.push(segments);
}
validate_config(&user_config).map_err(|err| {
config_write_error(
ConfigWriteErrorCode::ConfigValidationError,
format!("Invalid configuration: {err}"),
)
})?;
let updated_layers = layers.with_user_config(user_config.clone());
let effective = updated_layers.effective_config();
validate_config(&effective).map_err(|err| {
config_write_error(
ConfigWriteErrorCode::ConfigValidationError,
format!("Invalid configuration: {err}"),
)
})?;
if mutated {
self.persist_user_config(&user_config)
.await
.map_err(|err| internal_error("failed to persist config.toml", err))?;
}
let overridden = first_overridden_edit(&updated_layers, &effective, &parsed_segments);
let status = overridden
.as_ref()
.map(|_| WriteStatus::OkOverridden)
.unwrap_or(WriteStatus::Ok);
Ok(ConfigWriteResponse {
status,
version: updated_layers.user.version.clone(),
overridden_metadata: overridden,
})
}
async fn load_layers_state(&self) -> std::io::Result<LayersState> {
let LoadedConfigLayers {
base,
managed_config,
managed_preferences,
} = load_config_layers_with_overrides(&self.codex_home, self.loader_overrides.clone())
.await?;
let user = LayerState::new(
ConfigLayerName::User,
self.codex_home.join(CONFIG_FILE_NAME),
base,
);
let session_flags = LayerState::new(
ConfigLayerName::SessionFlags,
PathBuf::from(SESSION_FLAGS_SOURCE),
{
let mut root = TomlValue::Table(toml::map::Map::new());
for (path, value) in self.cli_overrides.iter() {
apply_override(&mut root, path, value.clone());
}
root
},
);
let system = managed_config.map(|cfg| {
LayerState::new(
ConfigLayerName::System,
system_config_path(&self.codex_home),
cfg,
)
});
let mdm = managed_preferences
.map(|cfg| LayerState::new(ConfigLayerName::Mdm, PathBuf::from(MDM_SOURCE), cfg));
Ok(LayersState {
user,
session_flags,
system,
mdm,
})
}
async fn persist_user_config(&self, user_config: &TomlValue) -> anyhow::Result<()> {
let codex_home = self.codex_home.clone();
let serialized = toml::to_string_pretty(user_config)?;
task::spawn_blocking(move || -> anyhow::Result<()> {
std::fs::create_dir_all(&codex_home)?;
let target = codex_home.join(CONFIG_FILE_NAME);
let tmp = NamedTempFile::new_in(&codex_home)?;
std::fs::write(tmp.path(), serialized.as_bytes())?;
tmp.persist(&target)?;
Ok(())
})
.await
.map_err(|err| anyhow!("config persistence task panicked: {err}"))??;
Ok(())
}
}
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
if let Some(code) = err.write_error_code() {
return config_write_error(code, err.to_string());
fn parse_value(value: JsonValue) -> Result<Option<TomlValue>, String> {
if value.is_null() {
return Ok(None);
}
serde_json::from_value::<TomlValue>(value)
.map(Some)
.map_err(|err| format!("invalid value: {err}"))
}
fn parse_key_path(path: &str) -> Result<Vec<String>, String> {
if path.trim().is_empty() {
return Err("keyPath must not be empty".to_string());
}
Ok(path
.split('.')
.map(std::string::ToString::to_string)
.collect())
}
fn apply_override(target: &mut TomlValue, path: &str, value: TomlValue) {
use toml::value::Table;
let segments: Vec<&str> = path.split('.').collect();
let mut current = target;
for (idx, segment) in segments.iter().enumerate() {
let is_last = idx == segments.len() - 1;
if is_last {
match current {
TomlValue::Table(table) => {
table.insert(segment.to_string(), value);
}
_ => {
let mut table = Table::new();
table.insert(segment.to_string(), value);
*current = TomlValue::Table(table);
}
}
return;
}
match current {
TomlValue::Table(table) => {
current = table
.entry((*segment).to_string())
.or_insert_with(|| TomlValue::Table(Table::new()));
}
_ => {
*current = TomlValue::Table(Table::new());
if let TomlValue::Table(tbl) = current {
current = tbl
.entry((*segment).to_string())
.or_insert_with(|| TomlValue::Table(Table::new()));
}
}
}
}
}
#[derive(Debug)]
enum MergeError {
PathNotFound,
Validation(String),
}
fn apply_merge(
root: &mut TomlValue,
segments: &[String],
value: Option<&TomlValue>,
strategy: MergeStrategy,
) -> Result<bool, MergeError> {
let Some(value) = value else {
return clear_path(root, segments);
};
let Some((last, parents)) = segments.split_last() else {
return Err(MergeError::Validation(
"keyPath must not be empty".to_string(),
));
};
let mut current = root;
for segment in parents {
match current {
TomlValue::Table(table) => {
current = table
.entry(segment.clone())
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
}
_ => {
*current = TomlValue::Table(toml::map::Map::new());
if let TomlValue::Table(table) = current {
current = table
.entry(segment.clone())
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
}
}
}
}
let table = current.as_table_mut().ok_or_else(|| {
MergeError::Validation("cannot set value on non-table parent".to_string())
})?;
if matches!(strategy, MergeStrategy::Upsert)
&& let Some(existing) = table.get_mut(last)
&& matches!(existing, TomlValue::Table(_))
&& matches!(value, TomlValue::Table(_))
{
merge_toml_values(existing, value);
return Ok(true);
}
let changed = table
.get(last)
.map(|existing| Some(existing) != Some(value))
.unwrap_or(true);
table.insert(last.clone(), value.clone());
Ok(changed)
}
fn clear_path(root: &mut TomlValue, segments: &[String]) -> Result<bool, MergeError> {
let Some((last, parents)) = segments.split_last() else {
return Err(MergeError::Validation(
"keyPath must not be empty".to_string(),
));
};
let mut current = root;
for segment in parents {
match current {
TomlValue::Table(table) => {
current = table.get_mut(segment).ok_or(MergeError::PathNotFound)?;
}
_ => return Err(MergeError::PathNotFound),
}
}
let Some(parent) = current.as_table_mut() else {
return Err(MergeError::PathNotFound);
};
Ok(parent.remove(last).is_some())
}
#[derive(Clone)]
struct LayerState {
name: ConfigLayerName,
source: PathBuf,
config: TomlValue,
version: String,
}
impl LayerState {
fn new(name: ConfigLayerName, source: PathBuf, config: TomlValue) -> Self {
let version = version_for_toml(&config);
Self {
name,
source,
config,
version,
}
}
fn metadata(&self) -> ConfigLayerMetadata {
ConfigLayerMetadata {
name: self.name.clone(),
source: self.source.display().to_string(),
version: self.version.clone(),
}
}
fn as_layer(&self) -> ConfigLayer {
ConfigLayer {
name: self.name.clone(),
source: self.source.display().to_string(),
version: self.version.clone(),
config: to_json_value(&self.config),
}
}
}
#[derive(Clone)]
struct LayersState {
user: LayerState,
session_flags: LayerState,
system: Option<LayerState>,
mdm: Option<LayerState>,
}
impl LayersState {
fn with_user_config(self, user_config: TomlValue) -> Self {
Self {
user: LayerState::new(self.user.name, self.user.source, user_config),
session_flags: self.session_flags,
system: self.system,
mdm: self.mdm,
}
}
fn effective_config(&self) -> TomlValue {
let mut merged = self.user.config.clone();
merge_toml_values(&mut merged, &self.session_flags.config);
if let Some(system) = &self.system {
merge_toml_values(&mut merged, &system.config);
}
if let Some(mdm) = &self.mdm {
merge_toml_values(&mut merged, &mdm.config);
}
merged
}
fn origins(&self) -> HashMap<String, ConfigLayerMetadata> {
let mut origins = HashMap::new();
let mut path = Vec::new();
record_origins(
&self.user.config,
&self.user.metadata(),
&mut path,
&mut origins,
);
record_origins(
&self.session_flags.config,
&self.session_flags.metadata(),
&mut path,
&mut origins,
);
if let Some(system) = &self.system {
record_origins(&system.config, &system.metadata(), &mut path, &mut origins);
}
if let Some(mdm) = &self.mdm {
record_origins(&mdm.config, &mdm.metadata(), &mut path, &mut origins);
}
origins
}
fn layers_high_to_low(&self) -> Vec<ConfigLayer> {
let mut layers = Vec::new();
if let Some(mdm) = &self.mdm {
layers.push(mdm.as_layer());
}
if let Some(system) = &self.system {
layers.push(system.as_layer());
}
layers.push(self.session_flags.as_layer());
layers.push(self.user.as_layer());
layers
}
}
fn record_origins(
value: &TomlValue,
meta: &ConfigLayerMetadata,
path: &mut Vec<String>,
origins: &mut HashMap<String, ConfigLayerMetadata>,
) {
match value {
TomlValue::Table(table) => {
for (key, val) in table {
path.push(key.clone());
record_origins(val, meta, path, origins);
path.pop();
}
}
TomlValue::Array(items) => {
for (idx, item) in items.iter().enumerate() {
path.push(idx.to_string());
record_origins(item, meta, path, origins);
path.pop();
}
}
_ => {
if !path.is_empty() {
origins.insert(path.join("."), meta.clone());
}
}
}
}
fn to_json_value(value: &TomlValue) -> JsonValue {
serde_json::to_value(value).unwrap_or(JsonValue::Null)
}
fn validate_config(value: &TomlValue) -> Result<(), toml::de::Error> {
let _: ConfigToml = value.clone().try_into()?;
Ok(())
}
fn version_for_toml(value: &TomlValue) -> String {
let json = to_json_value(value);
let canonical = canonical_json(&json);
let serialized = serde_json::to_vec(&canonical).unwrap_or_default();
let mut hasher = Sha256::new();
hasher.update(serialized);
let hash = hasher.finalize();
let hex = hash
.iter()
.map(|byte| format!("{byte:02x}"))
.collect::<String>();
format!("sha256:{hex}")
}
fn canonical_json(value: &JsonValue) -> JsonValue {
match value {
JsonValue::Object(map) => {
let mut sorted = serde_json::Map::new();
let mut keys = map.keys().cloned().collect::<Vec<_>>();
keys.sort();
for key in keys {
if let Some(val) = map.get(&key) {
sorted.insert(key, canonical_json(val));
}
}
JsonValue::Object(sorted)
}
JsonValue::Array(items) => JsonValue::Array(items.iter().map(canonical_json).collect()),
other => other.clone(),
}
}
fn paths_match(expected: &Path, provided: &str) -> bool {
let provided_path = PathBuf::from(provided);
if let (Ok(expanded_expected), Ok(expanded_provided)) =
(expected.canonicalize(), provided_path.canonicalize())
{
return expanded_expected == expanded_provided;
}
expected == provided_path
}
fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> {
let mut current = root;
for segment in segments {
match current {
TomlValue::Table(table) => {
current = table.get(segment)?;
}
TomlValue::Array(items) => {
let idx: usize = segment.parse().ok()?;
current = items.get(idx)?;
}
_ => return None,
}
}
Some(current)
}
fn override_message(layer: &ConfigLayerName) -> String {
match layer {
ConfigLayerName::Mdm => "Overridden by managed policy (mdm)".to_string(),
ConfigLayerName::System => "Overridden by managed config (system)".to_string(),
ConfigLayerName::SessionFlags => "Overridden by session flags".to_string(),
ConfigLayerName::User => "Overridden by user config".to_string(),
}
}
fn compute_override_metadata(
layers: &LayersState,
effective: &TomlValue,
segments: &[String],
) -> Option<OverriddenMetadata> {
let user_value = value_at_path(&layers.user.config, segments);
let effective_value = value_at_path(effective, segments);
if user_value.is_some() && user_value == effective_value {
return None;
}
if user_value.is_none() && effective_value.is_none() {
return None;
}
let effective_layer = find_effective_layer(layers, segments);
let overriding_layer = effective_layer.unwrap_or_else(|| layers.user.metadata());
let message = override_message(&overriding_layer.name);
Some(OverriddenMetadata {
message,
overriding_layer,
effective_value: effective_value
.map(to_json_value)
.unwrap_or(JsonValue::Null),
})
}
fn first_overridden_edit(
layers: &LayersState,
effective: &TomlValue,
edits: &[Vec<String>],
) -> Option<OverriddenMetadata> {
for segments in edits {
if let Some(meta) = compute_override_metadata(layers, effective, segments) {
return Some(meta);
}
}
None
}
fn find_effective_layer(layers: &LayersState, segments: &[String]) -> Option<ConfigLayerMetadata> {
let check =
|state: &LayerState| value_at_path(&state.config, segments).map(|_| state.metadata());
if let Some(mdm) = &layers.mdm
&& let Some(meta) = check(mdm)
{
return Some(meta);
}
if let Some(system) = &layers.system
&& let Some(meta) = check(system)
{
return Some(meta);
}
if let Some(meta) = check(&layers.session_flags) {
return Some(meta);
}
check(&layers.user)
}
fn system_config_path(codex_home: &Path) -> PathBuf {
if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") {
return PathBuf::from(path);
}
#[cfg(unix)]
{
let _ = codex_home;
PathBuf::from("/etc/codex/managed_config.toml")
}
#[cfg(not(unix))]
{
codex_home.join("managed_config.toml")
}
}
fn internal_error<E: std::fmt::Display>(context: &str, err: E) -> JSONRPCErrorError {
JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: err.to_string(),
message: format!("{context}: {err}"),
data: None,
}
}
@@ -68,3 +720,255 @@ fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) ->
})),
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[tokio::test]
async fn read_includes_origins_and_layers() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let response = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("response");
assert_eq!(
response.config.get("approval_policy"),
Some(&json!("never"))
);
assert_eq!(
response
.origins
.get("approval_policy")
.expect("origin")
.name,
ConfigLayerName::System
);
let layers = response.layers.expect("layers present");
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
assert_eq!(layers.last().unwrap().name, ConfigLayerName::User);
}
#[tokio::test]
async fn write_value_reports_override() {
let tmp = tempdir().expect("tempdir");
std::fs::write(
tmp.path().join(CONFIG_FILE_NAME),
"approval_policy = \"on-request\"",
)
.unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let result = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("never"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect("result");
let read_after = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("read");
let config_object = read_after.config.as_object().expect("object");
assert_eq!(config_object.get("approval_policy"), Some(&json!("never")));
assert_eq!(
read_after
.origins
.get("approval_policy")
.expect("origin")
.name,
ConfigLayerName::System
);
assert_eq!(result.status, WriteStatus::Ok);
assert!(result.overridden_metadata.is_none());
}
#[tokio::test]
async fn version_conflict_rejected() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let api = ConfigApi::new(tmp.path().to_path_buf(), vec![]);
let error = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-5"),
merge_strategy: MergeStrategy::Replace,
expected_version: Some("sha256:bogus".to_string()),
})
.await
.expect_err("should fail");
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error
.data
.as_ref()
.and_then(|d| d.get("config_write_error_code"))
.and_then(serde_json::Value::as_str),
Some("configVersionConflict")
);
}
#[tokio::test]
async fn invalid_user_value_rejected_even_if_overridden_by_managed() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let error = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("bogus"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect_err("should fail validation");
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(
error
.data
.as_ref()
.and_then(|d| d.get("config_write_error_code"))
.and_then(serde_json::Value::as_str),
Some("configValidationError")
);
let contents =
std::fs::read_to_string(tmp.path().join(CONFIG_FILE_NAME)).expect("read config");
assert_eq!(contents.trim(), "model = \"user\"");
}
#[tokio::test]
async fn read_reports_managed_overrides_user_and_session_flags() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "model = \"system\"").unwrap();
let cli_overrides = vec![(
"model".to_string(),
TomlValue::String("session".to_string()),
)];
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
cli_overrides,
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let response = api
.read(ConfigReadParams {
include_layers: true,
})
.await
.expect("response");
assert_eq!(response.config.get("model"), Some(&json!("system")));
assert_eq!(
response.origins.get("model").expect("origin").name,
ConfigLayerName::System
);
let layers = response.layers.expect("layers");
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
assert_eq!(layers.get(2).unwrap().name, ConfigLayerName::User);
}
#[tokio::test]
async fn write_value_reports_managed_override() {
let tmp = tempdir().expect("tempdir");
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "").unwrap();
let managed_path = tmp.path().join("managed_config.toml");
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
let api = ConfigApi::with_overrides(
tmp.path().to_path_buf(),
vec![],
LoaderOverrides {
managed_config_path: Some(managed_path),
#[cfg(target_os = "macos")]
managed_preferences_base64: None,
},
);
let result = api
.write_value(ConfigValueWriteParams {
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
key_path: "approval_policy".to_string(),
value: json!("on-request"),
merge_strategy: MergeStrategy::Replace,
expected_version: None,
})
.await
.expect("result");
assert_eq!(result.status, WriteStatus::OkOverridden);
let overridden = result.overridden_metadata.expect("overridden metadata");
assert_eq!(overridden.overriding_layer.name, ConfigLayerName::System);
assert_eq!(overridden.effective_value, json!("never"));
}
}

View File

@@ -3,6 +3,7 @@
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::path::PathBuf;
@@ -102,7 +103,6 @@ pub async fn run_main(
// control the log level with `RUST_LOG`.
let stderr_fmt = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
.with_filter(EnvFilter::from_default_env());
let feedback_layer = tracing_subscriber::fmt::layer()
@@ -111,15 +111,14 @@ pub async fn run_main(
.with_target(false)
.with_filter(Targets::new().with_default(Level::TRACE));
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
let otel_tracing_layer = otel.as_ref().and_then(|o| o.tracing_layer());
let _ = tracing_subscriber::registry()
.with(stderr_fmt)
.with(feedback_layer)
.with(otel_logger_layer)
.with(otel_tracing_layer)
.with(otel.as_ref().map(|provider| {
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
)
}))
.try_init();
// Task: process incoming messages.

View File

@@ -59,7 +59,6 @@ impl MessageProcessor {
outgoing.clone(),
codex_linux_sandbox_exe,
Arc::clone(&config),
cli_overrides.clone(),
feedback,
);
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
@@ -128,8 +127,6 @@ impl MessageProcessor {
self.outgoing.send_response(request_id, response).await;
self.initialized = true;
self.codex_message_processor
.spawn_model_presets_notification();
return;
}

View File

@@ -1,19 +1,12 @@
use std::sync::Arc;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_core::ConversationManager;
use codex_core::config::Config;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub async fn supported_models(
conversation_manager: Arc<ConversationManager>,
config: &Config,
) -> Vec<Model> {
conversation_manager
.list_models(config)
.await
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
.into_iter()
.map(model_from_preset)
.collect()
@@ -34,7 +27,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
}
fn reasoning_efforts_from_preset(
efforts: Vec<ReasoningEffortPreset>,
efforts: &'static [ReasoningEffortPreset],
) -> Vec<ReasoningEffortOption> {
efforts
.iter()

View File

@@ -16,9 +16,6 @@ use tracing::warn;
use crate::error_code::INTERNAL_ERROR_CODE;
#[cfg(test)]
use codex_protocol::account::PlanType;
/// Sends messages to the client and manages request callbacks.
pub(crate) struct OutgoingMessageSender {
next_request_id: AtomicI64,
@@ -233,7 +230,6 @@ mod tests {
}),
secondary: None,
credits: None,
plan_type: Some(PlanType::Plus),
},
});
@@ -249,8 +245,7 @@ mod tests {
"resetsAt": 123
},
"secondary": null,
"credits": null,
"planType": "plus"
"credits": null
}
},
}),

View File

@@ -13,7 +13,7 @@ assert_cmd = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
codex-app-server-protocol = { workspace = true }
codex-core = { workspace = true, features = ["test-support"] }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@@ -1,7 +1,6 @@
mod auth_fixtures;
mod mcp_process;
mod mock_model_server;
mod models_cache;
mod responses;
mod rollout;
@@ -12,16 +11,9 @@ pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use core_test_support::format_with_current_shell;
pub use core_test_support::format_with_current_shell_display;
pub use core_test_support::format_with_current_shell_display_non_login;
pub use core_test_support::format_with_current_shell_non_login;
pub use core_test_support::test_path_buf_with_windows;
pub use core_test_support::test_tmp_path;
pub use core_test_support::test_tmp_path_buf;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_chat_completions_server;
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
pub use models_cache::write_models_cache;
pub use models_cache::write_models_cache_with_models;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_exec_command_sse_response;
pub use responses::create_final_assistant_message_sse_response;

View File

@@ -1,85 +0,0 @@
use chrono::DateTime;
use chrono::Utc;
use codex_core::openai_models::model_presets::all_model_presets;
use codex_protocol::openai_models::ClientVersion;
use codex_protocol::openai_models::ConfigShellToolType;
use codex_protocol::openai_models::ModelInfo;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelVisibility;
use codex_protocol::openai_models::ReasoningSummaryFormat;
use codex_protocol::openai_models::TruncationPolicyConfig;
use serde_json::json;
use std::path::Path;
/// Convert a ModelPreset to ModelInfo for cache storage.
fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
ModelInfo {
slug: preset.id.clone(),
display_name: preset.display_name.clone(),
description: Some(preset.description.clone()),
default_reasoning_level: preset.default_reasoning_effort,
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
shell_type: ConfigShellToolType::ShellCommand,
visibility: if preset.show_in_picker {
ModelVisibility::List
} else {
ModelVisibility::Hide
},
minimal_client_version: ClientVersion(0, 1, 0),
supported_in_api: true,
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
base_instructions: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
context_window: None,
reasoning_summary_format: ReasoningSummaryFormat::None,
experimental_supported_tools: Vec::new(),
}
}
/// Write a models_cache.json file to the codex home directory.
/// This prevents ModelsManager from making network requests to refresh models.
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
/// Uses the built-in model presets from ModelsManager, converted to ModelInfo format.
pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
// Get all presets and filter for show_in_picker (same as builtin_model_presets does)
let presets: Vec<&ModelPreset> = all_model_presets()
.iter()
.filter(|preset| preset.show_in_picker)
.collect();
// Convert presets to ModelInfo, assigning priorities (higher = earlier in list)
// Priority is used for sorting, so first model gets highest priority
let models: Vec<ModelInfo> = presets
.iter()
.enumerate()
.map(|(idx, preset)| {
// Higher priority = earlier in list, so reverse the index
let priority = (presets.len() - idx) as i32;
preset_to_info(preset, priority)
})
.collect();
write_models_cache_with_models(codex_home, models)
}
/// Write a models_cache.json file with specific models.
/// Useful when tests need specific models to be available.
pub fn write_models_cache_with_models(
codex_home: &Path,
models: Vec<ModelInfo>,
) -> std::io::Result<()> {
let cache_path = codex_home.join("models_cache.json");
// DateTime<Utc> serializes to RFC3339 format by default with serde
let fetched_at: DateTime<Utc> = Utc::now();
let cache = json!({
"fetched_at": fetched_at,
"etag": null,
"models": models
});
std::fs::write(cache_path, serde_json::to_string_pretty(&cache)?)
}

View File

@@ -23,10 +23,10 @@ use codex_app_server_protocol::SendUserTurnResponse;
use codex_app_server_protocol::ServerRequest;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
@@ -271,6 +271,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
command: format_with_current_shell("python3 -c 'print(42)'"),
cwd: working_directory.clone(),
reason: None,
risk: None,
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}],
@@ -410,7 +411,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
cwd: first_cwd.clone(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.try_into()?],
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,

View File

@@ -1,6 +1,5 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::test_tmp_path;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserSavedConfigResponse;
use codex_app_server_protocol::JSONRPCResponse;
@@ -11,10 +10,10 @@ use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use codex_core::protocol::AskForApproval;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::path::Path;
@@ -24,12 +23,10 @@ use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let writable_root = test_tmp_path();
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
r#"
model = "gpt-5.1-codex-max"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
@@ -41,7 +38,7 @@ forced_chatgpt_workspace_id = "12345678-0000-0000-0000-000000000000"
forced_login_method = "chatgpt"
[sandbox_workspace_write]
writable_roots = [{}]
writable_roots = ["/tmp"]
network_access = true
exclude_tmpdir_env_var = true
exclude_slash_tmp = true
@@ -59,8 +56,6 @@ model_verbosity = "medium"
model_provider = "openai"
chatgpt_base_url = "https://api.chatgpt.com"
"#,
serde_json::json!(writable_root)
),
)
}
@@ -80,13 +75,12 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
.await??;
let config: GetUserSavedConfigResponse = to_response(resp)?;
let writable_root = test_tmp_path();
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
sandbox_settings: Some(SandboxSettings {
writable_roots: vec![writable_root],
writable_roots: vec!["/tmp".into()],
network_access: Some(true),
exclude_tmpdir_env_var: Some(true),
exclude_slash_tmp: Some(true),

View File

@@ -358,81 +358,3 @@ async fn test_list_and_resume_conversations() -> Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_conversations_fetches_through_filtered_pages() -> Result<()> {
let codex_home = TempDir::new()?;
// Only the last 3 conversations match the provider filter; request 3 and
// ensure pagination keeps fetching past non-matching pages.
let cases = [
(
"2025-03-04T12-00-00",
"2025-03-04T12:00:00Z",
"skip_provider",
),
(
"2025-03-03T12-00-00",
"2025-03-03T12:00:00Z",
"skip_provider",
),
(
"2025-03-02T12-00-00",
"2025-03-02T12:00:00Z",
"target_provider",
),
(
"2025-03-01T12-00-00",
"2025-03-01T12:00:00Z",
"target_provider",
),
(
"2025-02-28T12-00-00",
"2025-02-28T12:00:00Z",
"target_provider",
),
];
for (ts_file, ts_rfc, provider) in cases {
create_fake_rollout(
codex_home.path(),
ts_file,
ts_rfc,
"Hello",
Some(provider),
None,
)?;
}
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_list_conversations_request(ListConversationsParams {
page_size: Some(3),
cursor: None,
model_providers: Some(vec!["target_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ListConversationsResponse { items, next_cursor } =
to_response::<ListConversationsResponse>(resp)?;
assert_eq!(
items.len(),
3,
"should fetch across pages to satisfy the limit"
);
assert!(
items
.iter()
.all(|item| item.model_provider == "target_provider")
);
assert_eq!(next_cursor, None);
Ok(())
}

View File

@@ -1,6 +1,8 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::CancelLoginChatGptResponse;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::GetAuthStatusResponse;
use codex_app_server_protocol::JSONRPCError;
@@ -12,6 +14,7 @@ use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -84,6 +87,48 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_and_cancel_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let login_id = mcp.send_login_chat_gpt_request().await?;
let login_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
)
.await??;
let login: LoginChatGptResponse = to_response(login_resp)?;
let cancel_id = mcp
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
login_id: login.login_id,
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
// Optionally observe the completion notification; do not fail if it races.
let maybe_note = timeout(
Duration::from_secs(2),
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
)
.await;
if maybe_note.is_err() {
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
}
Ok(())
}
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let contents = format!(

View File

@@ -241,7 +241,7 @@ async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> {
async fn login_account_chatgpt_start() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;

View File

@@ -1,9 +1,6 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::test_path_buf_with_windows;
use app_test_support::test_tmp_path_buf;
use app_test_support::to_response;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigEdit;
use codex_app_server_protocol::ConfigLayerName;
@@ -15,8 +12,6 @@ use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::MergeStrategy;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::ToolsV2;
use codex_app_server_protocol::WriteStatus;
use pretty_assertions::assert_eq;
use serde_json::json;
@@ -62,7 +57,7 @@ sandbox_mode = "workspace-write"
layers,
} = to_response(resp)?;
assert_eq!(config.model.as_deref(), Some("gpt-user"));
assert_eq!(config.get("model"), Some(&json!("gpt-user")));
assert_eq!(
origins.get("model").expect("origin").name,
ConfigLayerName::User
@@ -76,97 +71,31 @@ sandbox_mode = "workspace-write"
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_tools() -> Result<()> {
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(
&codex_home,
r#"
model = "gpt-user"
[tools]
web_search = true
view_image = false
"#,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config,
origins,
layers,
} = to_response(resp)?;
let tools = config.tools.expect("tools present");
assert_eq!(
tools,
ToolsV2 {
web_search: Some(true),
view_image: Some(false),
}
);
assert_eq!(
origins.get("tools.web_search").expect("origin").name,
ConfigLayerName::User
);
assert_eq!(
origins.get("tools.view_image").expect("origin").name,
ConfigLayerName::User
);
let layers = layers.expect("layers present");
assert_eq!(layers.len(), 2);
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
assert_eq!(layers[1].name, ConfigLayerName::User);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
let user_dir = test_path_buf_with_windows("/user", Some(r"C:\Users\user"));
let system_dir = test_path_buf_with_windows("/system", Some(r"C:\System"));
write_config(
&codex_home,
&format!(
r#"
model = "gpt-user"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
[sandbox_workspace_write]
writable_roots = [{}]
writable_roots = ["/user"]
network_access = true
"#,
serde_json::json!(user_dir)
),
)?;
let managed_path = codex_home.path().join("managed_config.toml");
std::fs::write(
&managed_path,
format!(
r#"
r#"
model = "gpt-system"
approval_policy = "never"
[sandbox_workspace_write]
writable_roots = [{}]
writable_roots = ["/system"]
"#,
serde_json::json!(system_dir.clone())
),
)?;
let managed_path_str = managed_path.display().to_string();
@@ -194,29 +123,30 @@ writable_roots = [{}]
layers,
} = to_response(resp)?;
assert_eq!(config.model.as_deref(), Some("gpt-system"));
assert_eq!(config.get("model"), Some(&json!("gpt-system")));
assert_eq!(
origins.get("model").expect("origin").name,
ConfigLayerName::System
);
assert_eq!(config.approval_policy, Some(AskForApproval::Never));
assert_eq!(config.get("approval_policy"), Some(&json!("never")));
assert_eq!(
origins.get("approval_policy").expect("origin").name,
ConfigLayerName::System
);
assert_eq!(config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
assert_eq!(config.get("sandbox_mode"), Some(&json!("workspace-write")));
assert_eq!(
origins.get("sandbox_mode").expect("origin").name,
ConfigLayerName::User
);
let sandbox = config
.sandbox_workspace_write
.as_ref()
.expect("sandbox workspace write");
assert_eq!(sandbox.writable_roots, vec![system_dir]);
assert_eq!(
config
.get("sandbox_workspace_write")
.and_then(|v| v.get("writable_roots")),
Some(&json!(["/system"]))
);
assert_eq!(
origins
.get("sandbox_workspace_write.writable_roots.0")
@@ -225,7 +155,12 @@ writable_roots = [{}]
ConfigLayerName::System
);
assert!(sandbox.network_access);
assert_eq!(
config
.get("sandbox_workspace_write")
.and_then(|v| v.get("network_access")),
Some(&json!(true))
);
assert_eq!(
origins
.get("sandbox_workspace_write.network_access")
@@ -271,7 +206,7 @@ model = "gpt-old"
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: None,
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
@@ -284,16 +219,8 @@ model = "gpt-old"
)
.await??;
let write: ConfigWriteResponse = to_response(write_resp)?;
let expected_file_path = codex_home
.path()
.join("config.toml")
.canonicalize()
.unwrap()
.display()
.to_string();
assert_eq!(write.status, WriteStatus::Ok);
assert_eq!(write.file_path, expected_file_path);
assert!(write.overridden_metadata.is_none());
let verify_id = mcp
@@ -307,7 +234,7 @@ model = "gpt-old"
)
.await??;
let verify: ConfigReadResponse = to_response(verify_resp)?;
assert_eq!(verify.config.model.as_deref(), Some("gpt-new"));
assert_eq!(verify.config.get("model"), Some(&json!("gpt-new")));
Ok(())
}
@@ -327,7 +254,7 @@ model = "gpt-old"
let write_id = mcp
.send_config_value_write_request(ConfigValueWriteParams {
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
file_path: codex_home.path().join("config.toml").display().to_string(),
key_path: "model".to_string(),
value: json!("gpt-new"),
merge_strategy: MergeStrategy::Replace,
@@ -359,10 +286,9 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let writable_root = test_tmp_path_buf();
let batch_id = mcp
.send_config_batch_write_request(ConfigBatchWriteParams {
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
file_path: codex_home.path().join("config.toml").display().to_string(),
edits: vec![
ConfigEdit {
key_path: "sandbox_mode".to_string(),
@@ -372,7 +298,7 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
ConfigEdit {
key_path: "sandbox_workspace_write".to_string(),
value: json!({
"writable_roots": [writable_root.clone()],
"writable_roots": ["/tmp"],
"network_access": false
}),
merge_strategy: MergeStrategy::Replace,
@@ -388,14 +314,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
.await??;
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
assert_eq!(batch_write.status, WriteStatus::Ok);
let expected_file_path = codex_home
.path()
.join("config.toml")
.canonicalize()
.unwrap()
.display()
.to_string();
assert_eq!(batch_write.file_path, expected_file_path);
let read_id = mcp
.send_config_read_request(ConfigReadParams {
@@ -408,14 +326,22 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
)
.await??;
let read: ConfigReadResponse = to_response(read_resp)?;
assert_eq!(read.config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
let sandbox = read
.config
.sandbox_workspace_write
.as_ref()
.expect("sandbox workspace write");
assert_eq!(sandbox.writable_roots, vec![writable_root]);
assert!(!sandbox.network_access);
assert_eq!(
read.config.get("sandbox_mode"),
Some(&json!("workspace-write"))
);
assert_eq!(
read.config
.get("sandbox_workspace_write")
.and_then(|v| v.get("writable_roots")),
Some(&json!(["/tmp"]))
);
assert_eq!(
read.config
.get("sandbox_workspace_write")
.and_then(|v| v.get("network_access")),
Some(&json!(false))
);
Ok(())
}

View File

@@ -1,36 +1,35 @@
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_models_cache;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::ModelListResponse;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
async fn list_models_returns_empty_response_and_notification() -> Result<()> {
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let codex_home = TempDir::new()?;
write_models_cache(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some("ignored".to_string()),
limit: Some(100),
cursor: None,
})
.await?;
@@ -40,24 +39,12 @@ async fn list_models_returns_empty_response_and_notification() -> Result<()> {
)
.await??;
let ModelListResponse {} = to_response::<ModelListResponse>(response)?;
let ModelListResponse {
data: items,
next_cursor,
} = to_response::<ModelListResponse>(response)?;
let notification: JSONRPCNotification = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_notification_message("model/presets/updated"),
)
.await??;
let server_notification: ServerNotification = notification.try_into()?;
let ServerNotification::ModelPresetsUpdated(payload) = server_notification else {
unreachable!("expected model/presets/updated notification");
};
assert_eq!(payload.models, expected_models());
Ok(())
}
fn expected_models() -> Vec<Model> {
vec![
let expected_models = vec![
Model {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
@@ -75,7 +62,7 @@ fn expected_models() -> Vec<Model> {
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
description: "Maximizes reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
@@ -127,39 +114,6 @@ fn expected_models() -> Vec<Model> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex or ambiguous problems"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
@@ -187,5 +141,132 @@ fn expected_models() -> Vec<Model> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
]
];
assert_eq!(items, expected_models);
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let ModelListResponse {
data: first_items,
next_cursor: first_cursor,
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1-codex-max");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let ModelListResponse {
data: second_items,
next_cursor: second_cursor,
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.1-codex");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(third_cursor.clone()),
})
.await?;
let third_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(third_request)),
)
.await??;
let ModelListResponse {
data: third_items,
next_cursor: third_cursor,
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
})
.await?;
let fourth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)),
)
.await??;
let ModelListResponse {
data: fourth_items,
next_cursor: fourth_cursor,
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1");
assert!(fourth_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_models_rejects_invalid_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: None,
cursor: Some("invalid".to_string()),
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(error.error.message, "invalid cursor: invalid");
Ok(())
}

View File

@@ -11,7 +11,6 @@ use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::account::PlanType as AccountPlanType;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
@@ -154,7 +153,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
plan_type: Some(AccountPlanType::Pro),
},
};
assert_eq!(received, expected);

View File

@@ -93,7 +93,7 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
match started.item {
ThreadItem::EnteredReviewMode { id, review } => {
assert_eq!(id, turn_id);
assert_eq!(review, "commit 1234567: Tidy UI colors");
assert_eq!(review, "commit 1234567");
saw_entered_review_mode = true;
break;
}

View File

@@ -6,96 +6,37 @@ use codex_app_server_protocol::GitInfo as ApiGitInfo;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadListResponse;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
async fn init_mcp(codex_home: &Path) -> Result<McpProcess> {
let mut mcp = McpProcess::new(codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
Ok(mcp)
}
async fn list_threads(
mcp: &mut McpProcess,
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
) -> Result<ThreadListResponse> {
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor,
limit,
model_providers: providers,
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
to_response::<ThreadListResponse>(resp)
}
fn create_fake_rollouts<F, G>(
codex_home: &Path,
count: usize,
provider_for_index: F,
timestamp_for_index: G,
preview: &str,
) -> Result<Vec<String>>
where
F: Fn(usize) -> &'static str,
G: Fn(usize) -> (String, String),
{
let mut ids = Vec::with_capacity(count);
for i in 0..count {
let (ts_file, ts_rfc) = timestamp_for_index(i);
ids.push(create_fake_rollout(
codex_home,
&ts_file,
&ts_rfc,
preview,
Some(provider_for_index(i)),
None,
)?);
}
Ok(ids)
}
fn timestamp_at(
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
) -> (String, String) {
(
format!("{year:04}-{month:02}-{day:02}T{hour:02}-{minute:02}-{second:02}"),
format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z"),
)
}
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = init_mcp(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let list_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await?;
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
assert!(data.is_empty());
assert_eq!(next_cursor, None);
@@ -145,19 +86,26 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Page 1: limit 2 → expect next_cursor Some.
let page1_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page1_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
)
.await??;
let ThreadListResponse {
data: data1,
next_cursor: cursor1,
} = list_threads(
&mut mcp,
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
)
.await?;
} = to_response::<ThreadListResponse>(page1_resp)?;
assert_eq!(data1.len(), 2);
for thread in &data1 {
assert_eq!(thread.preview, "Hello");
@@ -171,16 +119,22 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
let cursor1 = cursor1.expect("expected nextCursor on first page");
// Page 2: with cursor → expect next_cursor None when no more results.
let page2_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: Some(cursor1),
limit: Some(2),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let page2_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
)
.await??;
let ThreadListResponse {
data: data2,
next_cursor: cursor2,
} = list_threads(
&mut mcp,
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
)
.await?;
} = to_response::<ThreadListResponse>(page2_resp)?;
assert!(data2.len() <= 2);
for thread in &data2 {
assert_eq!(thread.preview, "Hello");
@@ -219,16 +173,23 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Filter to only other_provider; expect 1 item, nextCursor None.
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["other_provider".to_string()]),
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["other_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await?;
.await??;
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
assert_eq!(data.len(), 1);
assert_eq!(next_cursor, None);
let thread = &data[0];
@@ -244,146 +205,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Newest 16 conversations belong to a different provider; the older 8 are the
// only ones that match the filter. We request 8 so the server must keep
// paging past the first two pages to reach the desired count.
create_fake_rollouts(
codex_home.path(),
24,
|i| {
if i < 16 {
"skip_provider"
} else {
"target_provider"
}
},
|i| timestamp_at(2025, 3, 30 - i as u32, 12, 0, 0),
"Hello",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
// Request 8 threads for the target provider; the matches only start on the
// third page so we rely on pagination to reach the limit.
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(8),
Some(vec!["target_provider".to_string()]),
)
.await?;
assert_eq!(
data.len(),
8,
"should keep paging until the requested count is filled"
);
assert!(
data.iter()
.all(|thread| thread.model_provider == "target_provider"),
"all returned threads must match the requested provider"
);
assert_eq!(
next_cursor, None,
"once the requested count is satisfied on the final page, nextCursor should be None"
);
Ok(())
}
#[tokio::test]
async fn thread_list_enforces_max_limit() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
create_fake_rollouts(
codex_home.path(),
105,
|_| "mock_provider",
|i| {
let month = 5 + (i / 28);
let day = (i % 28) + 1;
timestamp_at(2025, month as u32, day as u32, 0, 0, 0)
},
"Hello",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(200),
Some(vec!["mock_provider".to_string()]),
)
.await?;
assert_eq!(
data.len(),
100,
"limit should be clamped to the maximum page size"
);
assert!(
next_cursor.is_some(),
"when more than the maximum exist, nextCursor should continue pagination"
);
Ok(())
}
#[tokio::test]
async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
// Only the last 7 conversations match the provider filter; we ask for 10 to
// ensure the server exhausts pagination without looping forever.
create_fake_rollouts(
codex_home.path(),
22,
|i| {
if i < 15 {
"skip_provider"
} else {
"target_provider"
}
},
|i| timestamp_at(2025, 4, 28 - i as u32, 8, 0, 0),
"Hello",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
// Request more threads than exist after filtering; expect all matches to be
// returned with nextCursor None.
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["target_provider".to_string()]),
)
.await?;
assert_eq!(
data.len(),
7,
"all available filtered threads should be returned"
);
assert!(
data.iter()
.all(|thread| thread.model_provider == "target_provider"),
"results should still respect the provider filter"
);
assert_eq!(
next_cursor, None,
"when results are exhausted before reaching the limit, nextCursor should be None"
);
Ok(())
}
#[tokio::test]
async fn thread_list_includes_git_info() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -403,15 +224,22 @@ async fn thread_list_includes_git_info() -> Result<()> {
Some(git_info),
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
let list_id = mcp
.send_thread_list_request(ThreadListParams {
cursor: None,
limit: Some(10),
model_providers: Some(vec!["mock_provider".to_string()]),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await?;
.await??;
let ThreadListResponse { data, .. } = to_response::<ThreadListResponse>(resp)?;
let thread = data
.iter()
.find(|t| t.id == conversation_id)

View File

@@ -11,7 +11,6 @@ use app_test_support::to_response;
use codex_app_server_protocol::ApprovalDecision;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
@@ -30,8 +29,8 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::protocol_config_types::ReasoningEffort;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
@@ -427,6 +426,7 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
request_id,
serde_json::to_value(CommandExecutionRequestApprovalResponse {
decision: ApprovalDecision::Decline,
accept_settings: None,
})?,
)
.await?;
@@ -532,7 +532,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: vec![first_cwd.try_into()?],
writable_roots: vec![first_cwd.clone()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
@@ -725,26 +725,6 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
)
.await?;
let output_delta_notif = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/fileChange/outputDelta"),
)
.await??;
let output_delta: FileChangeOutputDeltaNotification = serde_json::from_value(
output_delta_notif
.params
.clone()
.expect("item/fileChange/outputDelta params"),
)?;
assert_eq!(output_delta.thread_id, thread.id);
assert_eq!(output_delta.turn_id, turn.id);
assert_eq!(output_delta.item_id, "patch-call");
assert!(
!output_delta.delta.is_empty(),
"expected delta to be non-empty, got: {}",
output_delta.delta
);
let completed_file_change = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let completed_notif = mcp

View File

@@ -1,369 +0,0 @@
use std::collections::HashMap;
use std::path::Path;
use std::sync::LazyLock;
use tree_sitter::Parser;
use tree_sitter::Query;
use tree_sitter::QueryCursor;
use tree_sitter::StreamingIterator;
use tree_sitter_bash::LANGUAGE as BASH;
use crate::ApplyPatchAction;
use crate::ApplyPatchArgs;
use crate::ApplyPatchError;
use crate::ApplyPatchFileChange;
use crate::ApplyPatchFileUpdate;
use crate::IoError;
use crate::MaybeApplyPatchVerified;
use crate::parser::Hunk;
use crate::parser::ParseError;
use crate::parser::parse_patch;
use crate::unified_diff_from_chunks;
use std::str::Utf8Error;
use tree_sitter::LanguageError;
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ApplyPatchShell {
Unix,
PowerShell,
Cmd,
}
#[derive(Debug, PartialEq)]
pub enum MaybeApplyPatch {
Body(ApplyPatchArgs),
ShellParseError(ExtractHeredocError),
PatchParseError(ParseError),
NotApplyPatch,
}
#[derive(Debug, PartialEq)]
pub enum ExtractHeredocError {
CommandDidNotStartWithApplyPatch,
FailedToLoadBashGrammar(LanguageError),
HeredocNotUtf8(Utf8Error),
FailedToParsePatchIntoAst,
FailedToFindHeredocBody,
}
fn classify_shell_name(shell: &str) -> Option<String> {
std::path::Path::new(shell)
.file_stem()
.and_then(|name| name.to_str())
.map(str::to_ascii_lowercase)
}
fn classify_shell(shell: &str, flag: &str) -> Option<ApplyPatchShell> {
classify_shell_name(shell).and_then(|name| match name.as_str() {
"bash" | "zsh" | "sh" if matches!(flag, "-lc" | "-c") => Some(ApplyPatchShell::Unix),
"pwsh" | "powershell" if flag.eq_ignore_ascii_case("-command") => {
Some(ApplyPatchShell::PowerShell)
}
"cmd" if flag.eq_ignore_ascii_case("/c") => Some(ApplyPatchShell::Cmd),
_ => None,
})
}
fn can_skip_flag(shell: &str, flag: &str) -> bool {
classify_shell_name(shell).is_some_and(|name| {
matches!(name.as_str(), "pwsh" | "powershell") && flag.eq_ignore_ascii_case("-noprofile")
})
}
fn parse_shell_script(argv: &[String]) -> Option<(ApplyPatchShell, &str)> {
match argv {
[shell, flag, script] => classify_shell(shell, flag).map(|shell_type| {
let script = script.as_str();
(shell_type, script)
}),
[shell, skip_flag, flag, script] if can_skip_flag(shell, skip_flag) => {
classify_shell(shell, flag).map(|shell_type| {
let script = script.as_str();
(shell_type, script)
})
}
_ => None,
}
}
fn extract_apply_patch_from_shell(
shell: ApplyPatchShell,
script: &str,
) -> std::result::Result<(String, Option<String>), ExtractHeredocError> {
match shell {
ApplyPatchShell::Unix | ApplyPatchShell::PowerShell | ApplyPatchShell::Cmd => {
extract_apply_patch_from_bash(script)
}
}
}
// TODO: make private once we remove tests in lib.rs
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
match argv {
// Direct invocation: apply_patch <patch>
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
Ok(source) => MaybeApplyPatch::Body(source),
Err(e) => MaybeApplyPatch::PatchParseError(e),
},
// Shell heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
_ => match parse_shell_script(argv) {
Some((shell, script)) => match extract_apply_patch_from_shell(shell, script) {
Ok((body, workdir)) => match parse_patch(&body) {
Ok(mut source) => {
source.workdir = workdir;
MaybeApplyPatch::Body(source)
}
Err(e) => MaybeApplyPatch::PatchParseError(e),
},
Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch) => {
MaybeApplyPatch::NotApplyPatch
}
Err(e) => MaybeApplyPatch::ShellParseError(e),
},
None => MaybeApplyPatch::NotApplyPatch,
},
}
}
/// cwd must be an absolute path so that we can resolve relative paths in the
/// patch.
pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified {
// Detect a raw patch body passed directly as the command or as the body of a shell
// script. In these cases, report an explicit error rather than applying the patch.
if let [body] = argv
&& parse_patch(body).is_ok()
{
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
}
if let Some((_, script)) = parse_shell_script(argv)
&& parse_patch(script).is_ok()
{
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
}
match maybe_parse_apply_patch(argv) {
MaybeApplyPatch::Body(ApplyPatchArgs {
patch,
hunks,
workdir,
}) => {
let effective_cwd = workdir
.as_ref()
.map(|dir| {
let path = Path::new(dir);
if path.is_absolute() {
path.to_path_buf()
} else {
cwd.join(path)
}
})
.unwrap_or_else(|| cwd.to_path_buf());
let mut changes = HashMap::new();
for hunk in hunks {
let path = hunk.resolve_path(&effective_cwd);
match hunk {
Hunk::AddFile { contents, .. } => {
changes.insert(path, ApplyPatchFileChange::Add { content: contents });
}
Hunk::DeleteFile { .. } => {
let content = match std::fs::read_to_string(&path) {
Ok(content) => content,
Err(e) => {
return MaybeApplyPatchVerified::CorrectnessError(
ApplyPatchError::IoError(IoError {
context: format!("Failed to read {}", path.display()),
source: e,
}),
);
}
};
changes.insert(path, ApplyPatchFileChange::Delete { content });
}
Hunk::UpdateFile {
move_path, chunks, ..
} => {
let ApplyPatchFileUpdate {
unified_diff,
content: contents,
} = match unified_diff_from_chunks(&path, &chunks) {
Ok(diff) => diff,
Err(e) => {
return MaybeApplyPatchVerified::CorrectnessError(e);
}
};
changes.insert(
path,
ApplyPatchFileChange::Update {
unified_diff,
move_path: move_path.map(|p| effective_cwd.join(p)),
new_content: contents,
},
);
}
}
}
MaybeApplyPatchVerified::Body(ApplyPatchAction {
changes,
patch,
cwd: effective_cwd,
})
}
MaybeApplyPatch::ShellParseError(e) => MaybeApplyPatchVerified::ShellParseError(e),
MaybeApplyPatch::PatchParseError(e) => MaybeApplyPatchVerified::CorrectnessError(e.into()),
MaybeApplyPatch::NotApplyPatch => MaybeApplyPatchVerified::NotApplyPatch,
}
}
/// Extract the heredoc body (and optional `cd` workdir) from a `bash -lc` script
/// that invokes the apply_patch tool using a heredoc.
///
/// Supported toplevel forms (must be the only toplevel statement):
/// - `apply_patch <<'EOF'\n...\nEOF`
/// - `cd <path> && apply_patch <<'EOF'\n...\nEOF`
///
/// Notes about matching:
/// - Parsed with Treesitter Bash and a strict query that uses anchors so the
/// heredocredirected statement is the only toplevel statement.
/// - The connector between `cd` and `apply_patch` must be `&&` (not `|` or `||`).
/// - Exactly one positional `word` argument is allowed for `cd` (no flags, no quoted
/// strings, no second argument).
/// - The apply command is validated inquery via `#any-of?` to allow `apply_patch`
/// or `applypatch`.
/// - Preceding or trailing commands (e.g., `echo ...;` or `... && echo done`) do not match.
///
/// Returns `(heredoc_body, Some(path))` when the `cd` variant matches, or
/// `(heredoc_body, None)` for the direct form. Errors are returned if the script
/// cannot be parsed or does not match the allowed patterns.
fn extract_apply_patch_from_bash(
src: &str,
) -> std::result::Result<(String, Option<String>), ExtractHeredocError> {
// This function uses a Tree-sitter query to recognize one of two
// whole-script forms, each expressed as a single top-level statement:
//
// 1. apply_patch <<'EOF'\n...\nEOF
// 2. cd <path> && apply_patch <<'EOF'\n...\nEOF
//
// Key ideas when reading the query:
// - dots (`.`) between named nodes enforces adjacency among named children and
// anchor to the start/end of the expression.
// - we match a single redirected_statement directly under program with leading
// and trailing anchors (`.`). This ensures it is the only top-level statement
// (so prefixes like `echo ...;` or suffixes like `... && echo done` do not match).
//
// Overall, we want to be conservative and only match the intended forms, as other
// forms are likely to be model errors, or incorrectly interpreted by later code.
//
// If you're editing this query, it's helpful to start by creating a debugging binary
// which will let you see the AST of an arbitrary bash script passed in, and optionally
// also run an arbitrary query against the AST. This is useful for understanding
// how tree-sitter parses the script and whether the query syntax is correct. Be sure
// to test both positive and negative cases.
static APPLY_PATCH_QUERY: LazyLock<Query> = LazyLock::new(|| {
let language = BASH.into();
#[expect(clippy::expect_used)]
Query::new(
&language,
r#"
(
program
. (redirected_statement
body: (command
name: (command_name (word) @apply_name) .)
(#any-of? @apply_name "apply_patch" "applypatch")
redirect: (heredoc_redirect
. (heredoc_start)
. (heredoc_body) @heredoc
. (heredoc_end)
.))
.)
(
program
. (redirected_statement
body: (list
. (command
name: (command_name (word) @cd_name) .
argument: [
(word) @cd_path
(string (string_content) @cd_path)
(raw_string) @cd_raw_string
] .)
"&&"
. (command
name: (command_name (word) @apply_name))
.)
(#eq? @cd_name "cd")
(#any-of? @apply_name "apply_patch" "applypatch")
redirect: (heredoc_redirect
. (heredoc_start)
. (heredoc_body) @heredoc
. (heredoc_end)
.))
.)
"#,
)
.expect("valid bash query")
});
let lang = BASH.into();
let mut parser = Parser::new();
parser
.set_language(&lang)
.map_err(ExtractHeredocError::FailedToLoadBashGrammar)?;
let tree = parser
.parse(src, None)
.ok_or(ExtractHeredocError::FailedToParsePatchIntoAst)?;
let bytes = src.as_bytes();
let root = tree.root_node();
let mut cursor = QueryCursor::new();
let mut matches = cursor.matches(&APPLY_PATCH_QUERY, root, bytes);
while let Some(m) = matches.next() {
let mut heredoc_text: Option<String> = None;
let mut cd_path: Option<String> = None;
for capture in m.captures.iter() {
let name = APPLY_PATCH_QUERY.capture_names()[capture.index as usize];
match name {
"heredoc" => {
let text = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?
.trim_end_matches('\n')
.to_string();
heredoc_text = Some(text);
}
"cd_path" => {
let text = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?
.to_string();
cd_path = Some(text);
}
"cd_raw_string" => {
let raw = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?;
let trimmed = raw
.strip_prefix('\'')
.and_then(|s| s.strip_suffix('\''))
.unwrap_or(raw);
cd_path = Some(trimmed.to_string());
}
_ => {}
}
}
if let Some(heredoc) = heredoc_text {
return Ok((heredoc, cd_path));
}
}
Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch)
}

View File

@@ -1,4 +1,3 @@
mod invocation;
mod parser;
mod seek_sequence;
mod standalone_executable;
@@ -6,6 +5,8 @@ mod standalone_executable;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::str::Utf8Error;
use std::sync::LazyLock;
use anyhow::Context;
use anyhow::Result;
@@ -16,15 +17,27 @@ use parser::UpdateFileChunk;
pub use parser::parse_patch;
use similar::TextDiff;
use thiserror::Error;
use tree_sitter::LanguageError;
use tree_sitter::Parser;
use tree_sitter::Query;
use tree_sitter::QueryCursor;
use tree_sitter::StreamingIterator;
use tree_sitter_bash::LANGUAGE as BASH;
pub use invocation::maybe_parse_apply_patch_verified;
pub use standalone_executable::main;
use crate::invocation::ExtractHeredocError;
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ApplyPatchShell {
Unix,
PowerShell,
Cmd,
}
#[derive(Debug, Error, PartialEq)]
pub enum ApplyPatchError {
#[error(transparent)]
@@ -73,6 +86,14 @@ impl PartialEq for IoError {
}
}
#[derive(Debug, PartialEq)]
pub enum MaybeApplyPatch {
Body(ApplyPatchArgs),
ShellParseError(ExtractHeredocError),
PatchParseError(ParseError),
NotApplyPatch,
}
/// Both the raw PATCH argument to `apply_patch` as well as the PATCH argument
/// parsed into hunks.
#[derive(Debug, PartialEq)]
@@ -82,6 +103,84 @@ pub struct ApplyPatchArgs {
pub workdir: Option<String>,
}
fn classify_shell_name(shell: &str) -> Option<String> {
std::path::Path::new(shell)
.file_stem()
.and_then(|name| name.to_str())
.map(str::to_ascii_lowercase)
}
fn classify_shell(shell: &str, flag: &str) -> Option<ApplyPatchShell> {
classify_shell_name(shell).and_then(|name| match name.as_str() {
"bash" | "zsh" | "sh" if flag == "-lc" => Some(ApplyPatchShell::Unix),
"pwsh" | "powershell" if flag.eq_ignore_ascii_case("-command") => {
Some(ApplyPatchShell::PowerShell)
}
"cmd" if flag.eq_ignore_ascii_case("/c") => Some(ApplyPatchShell::Cmd),
_ => None,
})
}
fn can_skip_flag(shell: &str, flag: &str) -> bool {
classify_shell_name(shell).is_some_and(|name| {
matches!(name.as_str(), "pwsh" | "powershell") && flag.eq_ignore_ascii_case("-noprofile")
})
}
fn parse_shell_script(argv: &[String]) -> Option<(ApplyPatchShell, &str)> {
match argv {
[shell, flag, script] => classify_shell(shell, flag).map(|shell_type| {
let script = script.as_str();
(shell_type, script)
}),
[shell, skip_flag, flag, script] if can_skip_flag(shell, skip_flag) => {
classify_shell(shell, flag).map(|shell_type| {
let script = script.as_str();
(shell_type, script)
})
}
_ => None,
}
}
fn extract_apply_patch_from_shell(
shell: ApplyPatchShell,
script: &str,
) -> std::result::Result<(String, Option<String>), ExtractHeredocError> {
match shell {
ApplyPatchShell::Unix | ApplyPatchShell::PowerShell | ApplyPatchShell::Cmd => {
extract_apply_patch_from_bash(script)
}
}
}
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
match argv {
// Direct invocation: apply_patch <patch>
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
Ok(source) => MaybeApplyPatch::Body(source),
Err(e) => MaybeApplyPatch::PatchParseError(e),
},
// Shell heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
_ => match parse_shell_script(argv) {
Some((shell, script)) => match extract_apply_patch_from_shell(shell, script) {
Ok((body, workdir)) => match parse_patch(&body) {
Ok(mut source) => {
source.workdir = workdir;
MaybeApplyPatch::Body(source)
}
Err(e) => MaybeApplyPatch::PatchParseError(e),
},
Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch) => {
MaybeApplyPatch::NotApplyPatch
}
Err(e) => MaybeApplyPatch::ShellParseError(e),
},
None => MaybeApplyPatch::NotApplyPatch,
},
}
}
#[derive(Debug, PartialEq)]
pub enum ApplyPatchFileChange {
Add {
@@ -170,6 +269,256 @@ impl ApplyPatchAction {
}
}
/// cwd must be an absolute path so that we can resolve relative paths in the
/// patch.
pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified {
// Detect a raw patch body passed directly as the command or as the body of a shell
// script. In these cases, report an explicit error rather than applying the patch.
if let [body] = argv
&& parse_patch(body).is_ok()
{
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
}
if let Some((_, script)) = parse_shell_script(argv)
&& parse_patch(script).is_ok()
{
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
}
match maybe_parse_apply_patch(argv) {
MaybeApplyPatch::Body(ApplyPatchArgs {
patch,
hunks,
workdir,
}) => {
let effective_cwd = workdir
.as_ref()
.map(|dir| {
let path = Path::new(dir);
if path.is_absolute() {
path.to_path_buf()
} else {
cwd.join(path)
}
})
.unwrap_or_else(|| cwd.to_path_buf());
let mut changes = HashMap::new();
for hunk in hunks {
let path = hunk.resolve_path(&effective_cwd);
match hunk {
Hunk::AddFile { contents, .. } => {
changes.insert(path, ApplyPatchFileChange::Add { content: contents });
}
Hunk::DeleteFile { .. } => {
let content = match std::fs::read_to_string(&path) {
Ok(content) => content,
Err(e) => {
return MaybeApplyPatchVerified::CorrectnessError(
ApplyPatchError::IoError(IoError {
context: format!("Failed to read {}", path.display()),
source: e,
}),
);
}
};
changes.insert(path, ApplyPatchFileChange::Delete { content });
}
Hunk::UpdateFile {
move_path, chunks, ..
} => {
let ApplyPatchFileUpdate {
unified_diff,
content: contents,
} = match unified_diff_from_chunks(&path, &chunks) {
Ok(diff) => diff,
Err(e) => {
return MaybeApplyPatchVerified::CorrectnessError(e);
}
};
changes.insert(
path,
ApplyPatchFileChange::Update {
unified_diff,
move_path: move_path.map(|p| effective_cwd.join(p)),
new_content: contents,
},
);
}
}
}
MaybeApplyPatchVerified::Body(ApplyPatchAction {
changes,
patch,
cwd: effective_cwd,
})
}
MaybeApplyPatch::ShellParseError(e) => MaybeApplyPatchVerified::ShellParseError(e),
MaybeApplyPatch::PatchParseError(e) => MaybeApplyPatchVerified::CorrectnessError(e.into()),
MaybeApplyPatch::NotApplyPatch => MaybeApplyPatchVerified::NotApplyPatch,
}
}
/// Extract the heredoc body (and optional `cd` workdir) from a `bash -lc` script
/// that invokes the apply_patch tool using a heredoc.
///
/// Supported toplevel forms (must be the only toplevel statement):
/// - `apply_patch <<'EOF'\n...\nEOF`
/// - `cd <path> && apply_patch <<'EOF'\n...\nEOF`
///
/// Notes about matching:
/// - Parsed with Treesitter Bash and a strict query that uses anchors so the
/// heredocredirected statement is the only toplevel statement.
/// - The connector between `cd` and `apply_patch` must be `&&` (not `|` or `||`).
/// - Exactly one positional `word` argument is allowed for `cd` (no flags, no quoted
/// strings, no second argument).
/// - The apply command is validated inquery via `#any-of?` to allow `apply_patch`
/// or `applypatch`.
/// - Preceding or trailing commands (e.g., `echo ...;` or `... && echo done`) do not match.
///
/// Returns `(heredoc_body, Some(path))` when the `cd` variant matches, or
/// `(heredoc_body, None)` for the direct form. Errors are returned if the script
/// cannot be parsed or does not match the allowed patterns.
fn extract_apply_patch_from_bash(
src: &str,
) -> std::result::Result<(String, Option<String>), ExtractHeredocError> {
// This function uses a Tree-sitter query to recognize one of two
// whole-script forms, each expressed as a single top-level statement:
//
// 1. apply_patch <<'EOF'\n...\nEOF
// 2. cd <path> && apply_patch <<'EOF'\n...\nEOF
//
// Key ideas when reading the query:
// - dots (`.`) between named nodes enforces adjacency among named children and
// anchor to the start/end of the expression.
// - we match a single redirected_statement directly under program with leading
// and trailing anchors (`.`). This ensures it is the only top-level statement
// (so prefixes like `echo ...;` or suffixes like `... && echo done` do not match).
//
// Overall, we want to be conservative and only match the intended forms, as other
// forms are likely to be model errors, or incorrectly interpreted by later code.
//
// If you're editing this query, it's helpful to start by creating a debugging binary
// which will let you see the AST of an arbitrary bash script passed in, and optionally
// also run an arbitrary query against the AST. This is useful for understanding
// how tree-sitter parses the script and whether the query syntax is correct. Be sure
// to test both positive and negative cases.
static APPLY_PATCH_QUERY: LazyLock<Query> = LazyLock::new(|| {
let language = BASH.into();
#[expect(clippy::expect_used)]
Query::new(
&language,
r#"
(
program
. (redirected_statement
body: (command
name: (command_name (word) @apply_name) .)
(#any-of? @apply_name "apply_patch" "applypatch")
redirect: (heredoc_redirect
. (heredoc_start)
. (heredoc_body) @heredoc
. (heredoc_end)
.))
.)
(
program
. (redirected_statement
body: (list
. (command
name: (command_name (word) @cd_name) .
argument: [
(word) @cd_path
(string (string_content) @cd_path)
(raw_string) @cd_raw_string
] .)
"&&"
. (command
name: (command_name (word) @apply_name))
.)
(#eq? @cd_name "cd")
(#any-of? @apply_name "apply_patch" "applypatch")
redirect: (heredoc_redirect
. (heredoc_start)
. (heredoc_body) @heredoc
. (heredoc_end)
.))
.)
"#,
)
.expect("valid bash query")
});
let lang = BASH.into();
let mut parser = Parser::new();
parser
.set_language(&lang)
.map_err(ExtractHeredocError::FailedToLoadBashGrammar)?;
let tree = parser
.parse(src, None)
.ok_or(ExtractHeredocError::FailedToParsePatchIntoAst)?;
let bytes = src.as_bytes();
let root = tree.root_node();
let mut cursor = QueryCursor::new();
let mut matches = cursor.matches(&APPLY_PATCH_QUERY, root, bytes);
while let Some(m) = matches.next() {
let mut heredoc_text: Option<String> = None;
let mut cd_path: Option<String> = None;
for capture in m.captures.iter() {
let name = APPLY_PATCH_QUERY.capture_names()[capture.index as usize];
match name {
"heredoc" => {
let text = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?
.trim_end_matches('\n')
.to_string();
heredoc_text = Some(text);
}
"cd_path" => {
let text = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?
.to_string();
cd_path = Some(text);
}
"cd_raw_string" => {
let raw = capture
.node
.utf8_text(bytes)
.map_err(ExtractHeredocError::HeredocNotUtf8)?;
let trimmed = raw
.strip_prefix('\'')
.and_then(|s| s.strip_suffix('\''))
.unwrap_or(raw);
cd_path = Some(trimmed.to_string());
}
_ => {}
}
}
if let Some(heredoc) = heredoc_text {
return Ok((heredoc, cd_path));
}
}
Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch)
}
#[derive(Debug, PartialEq)]
pub enum ExtractHeredocError {
CommandDidNotStartWithApplyPatch,
FailedToLoadBashGrammar(LanguageError),
HeredocNotUtf8(Utf8Error),
FailedToParsePatchIntoAst,
FailedToFindHeredocBody,
}
/// Applies the patch and prints the result to stdout/stderr.
pub fn apply_patch(
patch: &str,
@@ -544,9 +893,6 @@ pub fn print_summary(
#[cfg(test)]
mod tests {
use crate::invocation::MaybeApplyPatch;
use crate::invocation::maybe_parse_apply_patch;
use super::*;
use assert_matches::assert_matches;
use pretty_assertions::assert_eq;
@@ -703,13 +1049,6 @@ mod tests {
assert_match(&heredoc_script(""), None);
}
#[test]
fn test_heredoc_non_login_shell() {
let script = heredoc_script("");
let args = strs_to_strings(&["bash", "-c", &script]);
assert_match_args(args, None);
}
#[test]
fn test_heredoc_applypatch() {
let args = strs_to_strings(&[

View File

@@ -1 +0,0 @@
** text eol=lf

View File

@@ -1 +0,0 @@
This is a new file

View File

@@ -1,4 +0,0 @@
*** Begin Patch
*** Add File: bar.md
+This is a new file
*** End Patch

View File

@@ -1,9 +0,0 @@
*** Begin Patch
*** Add File: nested/new.txt
+created
*** Delete File: delete.txt
*** Update File: modify.txt
@@
-line2
+changed
*** End Patch

View File

@@ -1,4 +0,0 @@
line1
changed2
line3
changed4

View File

@@ -1,4 +0,0 @@
line1
line2
line3
line4

View File

@@ -1,9 +0,0 @@
*** Begin Patch
*** Update File: multi.txt
@@
-line2
+changed2
@@
-line4
+changed4
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: old/name.txt
*** Move to: renamed/dir/name.txt
@@
-old content
+new content
*** End Patch

View File

@@ -1,2 +0,0 @@
*** Begin Patch
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: modify.txt
@@
-missing
+changed
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Delete File: missing.txt
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Update File: foo.txt
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: missing.txt
@@
-old
+new
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: old/name.txt
*** Move to: renamed/dir/name.txt
@@
-from
+new
*** End Patch

View File

@@ -1,4 +0,0 @@
*** Begin Patch
*** Add File: duplicate.txt
+new content
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Delete File: dir
*** End Patch

View File

@@ -1,3 +0,0 @@
*** Begin Patch
*** Frobnicate File: foo
*** End Patch

View File

@@ -1,7 +0,0 @@
*** Begin Patch
*** Update File: no_newline.txt
@@
-no newline at end
+first line
+second line
*** End Patch

View File

@@ -1,8 +0,0 @@
*** Begin Patch
*** Add File: created.txt
+hello
*** Update File: missing.txt
@@
-old
+new
*** End Patch

View File

@@ -1,4 +0,0 @@
line1
line2
added line 1
added line 2

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: input.txt
@@
+added line 1
+added line 2
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: foo.txt
@@
-old
+new
*** End Patch

View File

@@ -1,6 +0,0 @@
*** Begin Patch
*** Update File: file.txt
@@
-one
+two
*** End Patch

View File

@@ -1,18 +0,0 @@
# Overview
This directory is a collection of end to end tests for the apply-patch specification, meant to be easily portable to other languages or platforms.
# Specification
Each test case is one directory, composed of input state (input/), the patch operation (patch.txt), and the expected final state (expected/). This structure is designed to keep tests simple (i.e. test exactly one patch at a time) while still providing enough flexibility to test any given operation across files.
Here's what this would look like for a simple test apply-patch test case to create a new file:
```
001_add/
input/
foo.md
expected/
foo.md
bar.md
patch.txt
```

View File

@@ -1,4 +1,3 @@
mod cli;
mod scenarios;
#[cfg(not(target_os = "windows"))]
mod tool;

View File

@@ -1,114 +0,0 @@
use assert_cmd::prelude::*;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use tempfile::tempdir;
#[test]
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
let scenario = scenario?;
let path = scenario.path();
if path.is_dir() {
run_apply_patch_scenario(&path)?;
}
}
Ok(())
}
/// Reads a scenario directory, copies the input files to a temporary directory, runs apply-patch,
/// and asserts that the final state matches the expected state exactly.
fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
let tmp = tempdir()?;
// Copy the input files to the temporary directory
let input_dir = dir.join("input");
if input_dir.is_dir() {
copy_dir_recursive(&input_dir, tmp.path())?;
}
// Read the patch.txt file
let patch = fs::read_to_string(dir.join("patch.txt"))?;
// Run apply_patch in the temporary directory. We intentionally do not assert
// on the exit status here; the scenarios are specified purely in terms of
// final filesystem state, which we compare below.
Command::cargo_bin("apply_patch")?
.arg(patch)
.current_dir(tmp.path())
.output()?;
// Assert that the final state matches the expected state exactly
let expected_dir = dir.join("expected");
let expected_snapshot = snapshot_dir(&expected_dir)?;
let actual_snapshot = snapshot_dir(tmp.path())?;
assert_eq!(
actual_snapshot,
expected_snapshot,
"Scenario {} did not match expected final state",
dir.display()
);
Ok(())
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum Entry {
File(Vec<u8>),
Dir,
}
fn snapshot_dir(root: &Path) -> anyhow::Result<BTreeMap<PathBuf, Entry>> {
let mut entries = BTreeMap::new();
if root.is_dir() {
snapshot_dir_recursive(root, root, &mut entries)?;
}
Ok(entries)
}
fn snapshot_dir_recursive(
base: &Path,
dir: &Path,
entries: &mut BTreeMap<PathBuf, Entry>,
) -> anyhow::Result<()> {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
let Some(stripped) = path.strip_prefix(base).ok() else {
continue;
};
let rel = stripped.to_path_buf();
let file_type = entry.file_type()?;
if file_type.is_dir() {
entries.insert(rel.clone(), Entry::Dir);
snapshot_dir_recursive(base, &path, entries)?;
} else if file_type.is_file() {
let contents = fs::read(&path)?;
entries.insert(rel, Entry::File(contents));
}
}
Ok(())
}
fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
for entry in fs::read_dir(src)? {
let entry = entry?;
let path = entry.path();
let file_type = entry.file_type()?;
let dest_path = dst.join(entry.file_name());
if file_type.is_dir() {
fs::create_dir_all(&dest_path)?;
copy_dir_recursive(&path, &dest_path)?;
} else if file_type.is_file() {
if let Some(parent) = dest_path.parent() {
fs::create_dir_all(parent)?;
}
fs::copy(&path, &dest_path)?;
}
}
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More