mirror of
https://github.com/openai/codex.git
synced 2026-02-06 08:53:41 +00:00
Compare commits
1 Commits
dev/cc/rel
...
pr7559
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c614258961 |
@@ -1,2 +1 @@
|
||||
iTerm
|
||||
psuedo
|
||||
44
.github/actions/linux-code-sign/action.yml
vendored
44
.github/actions/linux-code-sign/action.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: linux-code-sign
|
||||
description: Sign Linux artifacts with cosign.
|
||||
inputs:
|
||||
target:
|
||||
description: Target triple for the artifacts to sign.
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Absolute path to the directory containing built binaries to sign.
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@v3.7.0
|
||||
|
||||
- name: Cosign Linux artifacts
|
||||
shell: bash
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "1"
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_OIDC_CLIENT_ID: "sigstore"
|
||||
COSIGN_OIDC_ISSUER: "https://oauth2.sigstore.dev/auth"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
dest="${{ inputs.artifacts-dir }}"
|
||||
if [[ ! -d "$dest" ]]; then
|
||||
echo "Destination $dest does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
artifact="${dest}/${binary}"
|
||||
if [[ ! -f "$artifact" ]]; then
|
||||
echo "Binary $artifact not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cosign sign-blob \
|
||||
--yes \
|
||||
--bundle "${artifact}.sigstore" \
|
||||
"$artifact"
|
||||
done
|
||||
57
.github/actions/windows-code-sign/action.yml
vendored
57
.github/actions/windows-code-sign/action.yml
vendored
@@ -1,57 +0,0 @@
|
||||
name: windows-code-sign
|
||||
description: Sign Windows binaries with Azure Trusted Signing.
|
||||
inputs:
|
||||
target:
|
||||
description: Target triple for the artifacts to sign.
|
||||
required: true
|
||||
client-id:
|
||||
description: Azure Trusted Signing client ID.
|
||||
required: true
|
||||
tenant-id:
|
||||
description: Azure tenant ID for Trusted Signing.
|
||||
required: true
|
||||
subscription-id:
|
||||
description: Azure subscription ID for Trusted Signing.
|
||||
required: true
|
||||
endpoint:
|
||||
description: Azure Trusted Signing endpoint.
|
||||
required: true
|
||||
account-name:
|
||||
description: Azure Trusted Signing account name.
|
||||
required: true
|
||||
certificate-profile-name:
|
||||
description: Certificate profile name for signing.
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Azure login for Trusted Signing (OIDC)
|
||||
uses: azure/login@v2
|
||||
with:
|
||||
client-id: ${{ inputs.client-id }}
|
||||
tenant-id: ${{ inputs.tenant-id }}
|
||||
subscription-id: ${{ inputs.subscription-id }}
|
||||
|
||||
- name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: azure/trusted-signing-action@v0
|
||||
with:
|
||||
endpoint: ${{ inputs.endpoint }}
|
||||
trusted-signing-account-name: ${{ inputs.account-name }}
|
||||
certificate-profile-name: ${{ inputs.certificate-profile-name }}
|
||||
exclude-environment-credential: true
|
||||
exclude-workload-identity-credential: true
|
||||
exclude-managed-identity-credential: true
|
||||
exclude-shared-token-cache-credential: true
|
||||
exclude-visual-studio-credential: true
|
||||
exclude-visual-studio-code-credential: true
|
||||
exclude-azure-cli-credential: false
|
||||
exclude-azure-powershell-credential: true
|
||||
exclude-azure-developer-cli-credential: true
|
||||
exclude-interactive-browser-credential: true
|
||||
cache-dependencies: false
|
||||
files: |
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload staged npm package artifact
|
||||
uses: actions/upload-artifact@v6
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: codex-npm-staging
|
||||
path: ${{ steps.stage_npm_package.outputs.pack_output }}
|
||||
|
||||
41
.github/workflows/rust-ci.yml
vendored
41
.github/workflows/rust-ci.yml
vendored
@@ -166,7 +166,7 @@ jobs:
|
||||
# avoid caching the large target dir on the gnu-dev job.
|
||||
- name: Restore cargo home cache
|
||||
id: cache_cargo_home_restore
|
||||
uses: actions/cache/restore@v5
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
@@ -207,7 +207,7 @@ jobs:
|
||||
- name: Restore sccache cache (fallback)
|
||||
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
|
||||
id: cache_sccache_restore
|
||||
uses: actions/cache/restore@v5
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
@@ -226,7 +226,7 @@ jobs:
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Restore APT cache (musl)
|
||||
id: cache_apt_restore
|
||||
uses: actions/cache/restore@v5
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/var/cache/apt
|
||||
@@ -280,7 +280,7 @@ jobs:
|
||||
- name: Save cargo home cache
|
||||
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v5
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
@@ -292,7 +292,7 @@ jobs:
|
||||
- name: Save sccache cache (fallback)
|
||||
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v5
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
@@ -317,7 +317,7 @@ jobs:
|
||||
- name: Save APT cache (musl)
|
||||
if: always() && !cancelled() && (matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl') && steps.cache_apt_restore.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v5
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/var/cache/apt
|
||||
@@ -369,27 +369,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
# We have been running out of space when running this job on Linux for
|
||||
# x86_64-unknown-linux-gnu, so remove some unnecessary dependencies.
|
||||
- name: Remove unnecessary dependencies to save space
|
||||
if: ${{ startsWith(matrix.runner, 'ubuntu') }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
sudo rm -rf \
|
||||
/usr/local/lib/android \
|
||||
/usr/share/dotnet \
|
||||
/usr/local/share/boost \
|
||||
/usr/local/lib/node_modules \
|
||||
/opt/ghc
|
||||
sudo apt-get remove -y docker.io docker-compose podman buildah
|
||||
|
||||
# Some integration tests rely on DotSlash being installed.
|
||||
# See https://github.com/openai/codex/pull/7617.
|
||||
- name: Install DotSlash
|
||||
uses: facebook/install-dotslash@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
@@ -405,7 +384,7 @@ jobs:
|
||||
|
||||
- name: Restore cargo home cache
|
||||
id: cache_cargo_home_restore
|
||||
uses: actions/cache/restore@v5
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
@@ -445,7 +424,7 @@ jobs:
|
||||
- name: Restore sccache cache (fallback)
|
||||
if: ${{ env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true' }}
|
||||
id: cache_sccache_restore
|
||||
uses: actions/cache/restore@v5
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
@@ -468,7 +447,7 @@ jobs:
|
||||
- name: Save cargo home cache
|
||||
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v5
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
@@ -480,7 +459,7 @@ jobs:
|
||||
- name: Save sccache cache (fallback)
|
||||
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v5
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
|
||||
102
.github/workflows/rust-release.yml
vendored
102
.github/workflows/rust-release.yml
vendored
@@ -50,9 +50,6 @@ jobs:
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
@@ -84,7 +81,7 @@ jobs:
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- uses: actions/cache@v5
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
@@ -101,32 +98,7 @@ jobs:
|
||||
sudo apt-get install -y musl-tools pkg-config
|
||||
|
||||
- name: Cargo build
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ contains(matrix.target, 'windows') }}" == 'true' ]]; then
|
||||
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy --bin codex-windows-sandbox-setup --bin codex-command-runner
|
||||
else
|
||||
cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||
fi
|
||||
|
||||
- if: ${{ contains(matrix.target, 'linux') }}
|
||||
name: Cosign Linux artifacts
|
||||
uses: ./.github/actions/linux-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
|
||||
|
||||
- if: ${{ contains(matrix.target, 'windows') }}
|
||||
name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: ./.github/actions/windows-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
|
||||
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
|
||||
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
|
||||
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
|
||||
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
|
||||
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||
name: Configure Apple code signing
|
||||
@@ -262,7 +234,6 @@ jobs:
|
||||
local binary="$1"
|
||||
local source_path="target/${{ matrix.target }}/release/${binary}"
|
||||
local archive_path="${RUNNER_TEMP}/${binary}.zip"
|
||||
local ticket_path="target/${{ matrix.target }}/release/${binary}.notarization-ticket.json"
|
||||
|
||||
if [[ ! -f "$source_path" ]]; then
|
||||
echo "Binary $source_path not found"
|
||||
@@ -293,22 +264,6 @@ jobs:
|
||||
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_json=$(xcrun notarytool log "$submission_id" \
|
||||
--key "$notary_key_path" \
|
||||
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
|
||||
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
|
||||
--output-format json)
|
||||
|
||||
jq -n \
|
||||
--arg binary "$binary" \
|
||||
--arg target "${{ matrix.target }}" \
|
||||
--arg id "$submission_id" \
|
||||
--arg status "$status" \
|
||||
--argjson submission "$submission_json" \
|
||||
--argjson log "$log_json" \
|
||||
'{binary: $binary, target: $target, id: $id, status: $status, submission: $submission, log: $log}' \
|
||||
> "$ticket_path"
|
||||
}
|
||||
|
||||
notarize_binary "codex"
|
||||
@@ -323,28 +278,11 @@ jobs:
|
||||
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
|
||||
else
|
||||
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
|
||||
fi
|
||||
|
||||
if [[ "${{ matrix.runner }}" == macos* ]]; then
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
ticket_src="target/${{ matrix.target }}/release/${binary}.notarization-ticket.json"
|
||||
ticket_dest="$dest/${binary}-${{ matrix.target }}.notarization-ticket.json"
|
||||
if [[ -f "$ticket_src" ]]; then
|
||||
cp "$ticket_src" "$ticket_dest"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ "${{ matrix.target }}" == *linux* ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
|
||||
fi
|
||||
|
||||
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
||||
name: Install zstd
|
||||
shell: powershell
|
||||
@@ -368,10 +306,10 @@ jobs:
|
||||
|
||||
# For compatibility with environments that lack the `zstd` tool we
|
||||
# additionally create a `.tar.gz` for all platforms and `.zip` for
|
||||
# Windows and macOS alongside every single binary that we publish. The end result is:
|
||||
# Windows alongside every single binary that we publish. The end result is:
|
||||
# codex-<target>.zst (existing)
|
||||
# codex-<target>.tar.gz (new)
|
||||
# codex-<target>.zip (Windows/macOS)
|
||||
# codex-<target>.zip (only for Windows)
|
||||
|
||||
# 1. Produce a .tar.gz for every file in the directory *before* we
|
||||
# run `zstd --rm`, because that flag deletes the original files.
|
||||
@@ -383,36 +321,14 @@ jobs:
|
||||
continue
|
||||
fi
|
||||
|
||||
# Don't try to compress signature bundles.
|
||||
if [[ "$base" == *.sigstore ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Notarization ticket sidecars are bundled into the per-binary
|
||||
# archives; don't generate separate archives for them.
|
||||
if [[ "$base" == *.notarization-ticket.json ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create per-binary tar.gz
|
||||
tar_inputs=("$base")
|
||||
ticket_sidecar="${base}.notarization-ticket.json"
|
||||
if [[ -f "$dest/$ticket_sidecar" ]]; then
|
||||
tar_inputs+=("$ticket_sidecar")
|
||||
fi
|
||||
tar -C "$dest" -czf "$dest/${base}.tar.gz" "${tar_inputs[@]}"
|
||||
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
|
||||
|
||||
# Create zip archive for Windows binaries
|
||||
# Must run from inside the dest dir so 7z won't
|
||||
# embed the directory path inside the zip.
|
||||
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
||||
(cd "$dest" && 7z a "${base}.zip" "$base")
|
||||
elif [[ "${{ matrix.runner }}" == macos* ]]; then
|
||||
if [[ -f "$dest/$ticket_sidecar" ]]; then
|
||||
(cd "$dest" && zip -q "${base}.zip" "$base" "$ticket_sidecar")
|
||||
else
|
||||
(cd "$dest" && zip -q "${base}.zip" "$base")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Also create .zst (existing behaviour) *and* remove the original
|
||||
@@ -424,10 +340,6 @@ jobs:
|
||||
zstd "${zstd_args[@]}" "$dest/$base"
|
||||
done
|
||||
|
||||
if [[ "${{ matrix.runner }}" == macos* ]]; then
|
||||
rm -f "$dest"/*.notarization-ticket.json
|
||||
fi
|
||||
|
||||
- name: Remove signing keychain
|
||||
if: ${{ always() && matrix.runner == 'macos-15-xlarge' }}
|
||||
shell: bash
|
||||
@@ -451,7 +363,7 @@ jobs:
|
||||
fi
|
||||
fi
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
# Upload the per-binary .zst files as well as the new .tar.gz
|
||||
@@ -487,7 +399,7 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/download-artifact@v7
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: dist
|
||||
|
||||
|
||||
12
.github/workflows/shell-tool-mcp.yml
vendored
12
.github/workflows/shell-tool-mcp.yml
vendored
@@ -113,7 +113,7 @@ jobs:
|
||||
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
|
||||
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-rust-${{ matrix.target }}
|
||||
path: artifacts/**
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
@@ -291,7 +291,7 @@ jobs:
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v7
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
@@ -352,7 +352,7 @@ jobs:
|
||||
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
|
||||
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarball
|
||||
uses: actions/download-artifact@v7
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm
|
||||
|
||||
@@ -11,6 +11,7 @@ In the codex-rs folder where the rust code lives:
|
||||
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
|
||||
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
||||
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
||||
- Do not use unsigned integer even if the number cannot be negative.
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||
|
||||
@@ -74,7 +75,6 @@ If you don’t have the tool:
|
||||
### Test assertions
|
||||
|
||||
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
|
||||
@@ -95,14 +95,6 @@ function detectPackageManager() {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
|
||||
if (
|
||||
__dirname.includes(".bun/install/global") ||
|
||||
__dirname.includes(".bun\\install\\global")
|
||||
) {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
return userAgent ? "npm" : null;
|
||||
}
|
||||
|
||||
|
||||
739
codex-rs/Cargo.lock
generated
739
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -34,8 +34,6 @@ members = [
|
||||
"stdio-to-uds",
|
||||
"otel",
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -90,8 +88,6 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
|
||||
codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-stdio-to-uds = { path = "stdio-to-uds" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
@@ -100,7 +96,6 @@ codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
exec_server_test_support = { path = "exec-server/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
|
||||
@@ -109,6 +104,7 @@ allocative = "0.3.3"
|
||||
ansi-to-tui = "7.0.0"
|
||||
anyhow = "1"
|
||||
arboard = { version = "3", features = ["wayland-data-control"] }
|
||||
askama = "0.14"
|
||||
assert_cmd = "2"
|
||||
assert_matches = "1.5.0"
|
||||
async-channel = "2.3.1"
|
||||
@@ -142,14 +138,14 @@ icu_provider = { version = "2.1", features = ["sync"] }
|
||||
ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.44.3"
|
||||
insta = "1.43.2"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.177"
|
||||
log = "0.4"
|
||||
lru = "0.16.2"
|
||||
lru = "0.12.5"
|
||||
maplit = "1.0.2"
|
||||
mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
@@ -162,7 +158,6 @@ opentelemetry-appender-tracing = "0.30.0"
|
||||
opentelemetry-otlp = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
opentelemetry_sdk = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
@@ -180,17 +175,17 @@ reqwest = "0.12"
|
||||
rmcp = { version = "0.10.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
sentry = "0.46.0"
|
||||
sentry = "0.34.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.16"
|
||||
serde_yaml = "0.9"
|
||||
serde_with = "3.16"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
socket2 = "0.6.1"
|
||||
socket2 = "0.6.0"
|
||||
starlark = "0.13.0"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
@@ -227,7 +222,7 @@ vt100 = "0.16.2"
|
||||
walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
wildmatch = "2.6.1"
|
||||
wildmatch = "2.5.0"
|
||||
|
||||
wiremock = "0.6"
|
||||
zeroize = "1.8.2"
|
||||
|
||||
@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
|
||||
|
||||
### Notifications
|
||||
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS.
|
||||
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ workspace = true
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
|
||||
@@ -31,7 +31,6 @@ use std::process::Command;
|
||||
use ts_rs::TS;
|
||||
|
||||
const HEADER: &str = "// GENERATED CODE! DO NOT MODIFY BY HAND!\n\n";
|
||||
const IGNORED_DEFINITIONS: &[&str] = &["Option<()>"];
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GeneratedSchema {
|
||||
@@ -185,6 +184,7 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
|
||||
"ServerNotification",
|
||||
"ServerRequest",
|
||||
];
|
||||
const IGNORED_DEFINITIONS: &[&str] = &["Option<()>"];
|
||||
|
||||
let namespaced_types = collect_namespaced_types(&schemas);
|
||||
let mut definitions = Map::new();
|
||||
@@ -304,11 +304,8 @@ where
|
||||
out_dir.join(format!("{file_stem}.json"))
|
||||
};
|
||||
|
||||
if !IGNORED_DEFINITIONS.contains(&logical_name) {
|
||||
write_pretty_json(out_path, &schema_value)
|
||||
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
|
||||
}
|
||||
|
||||
write_pretty_json(out_path, &schema_value)
|
||||
.with_context(|| format!("Failed to write JSON schema for {file_stem}"))?;
|
||||
let namespace = match raw_namespace {
|
||||
Some("v1") | None => None,
|
||||
Some(ns) => Some(ns.to_string()),
|
||||
|
||||
@@ -117,9 +117,9 @@ client_request_definitions! {
|
||||
params: v2::ThreadListParams,
|
||||
response: v2::ThreadListResponse,
|
||||
},
|
||||
SkillsList => "skills/list" {
|
||||
params: v2::SkillsListParams,
|
||||
response: v2::SkillsListResponse,
|
||||
ThreadCompact => "thread/compact" {
|
||||
params: v2::ThreadCompactParams,
|
||||
response: v2::ThreadCompactResponse,
|
||||
},
|
||||
TurnStart => "turn/start" {
|
||||
params: v2::TurnStartParams,
|
||||
@@ -139,11 +139,6 @@ client_request_definitions! {
|
||||
response: v2::ModelListResponse,
|
||||
},
|
||||
|
||||
McpServerOauthLogin => "mcpServer/oauth/login" {
|
||||
params: v2::McpServerOauthLoginParams,
|
||||
response: v2::McpServerOauthLoginResponse,
|
||||
},
|
||||
|
||||
McpServersList => "mcpServers/list" {
|
||||
params: v2::ListMcpServersParams,
|
||||
response: v2::ListMcpServersResponse,
|
||||
@@ -527,10 +522,8 @@ server_notification_definitions! {
|
||||
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
|
||||
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
|
||||
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
|
||||
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
|
||||
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
|
||||
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
|
||||
McpServerOauthLoginCompleted => "mcpServer/oauthLogin/completed" (v2::McpServerOauthLoginCompletedNotification),
|
||||
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
|
||||
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
|
||||
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
|
||||
@@ -654,6 +647,7 @@ mod tests {
|
||||
command: vec!["echo".to_string(), "hello".to_string()],
|
||||
cwd: PathBuf::from("/tmp"),
|
||||
reason: Some("because tests".to_string()),
|
||||
risk: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "echo hello".to_string(),
|
||||
}],
|
||||
@@ -673,6 +667,7 @@ mod tests {
|
||||
"command": ["echo", "hello"],
|
||||
"cwd": "/tmp",
|
||||
"reason": "because tests",
|
||||
"risk": null,
|
||||
"parsedCmd": [
|
||||
{
|
||||
"type": "unknown",
|
||||
|
||||
@@ -13,10 +13,10 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
use codex_protocol::protocol::ReviewDecision;
|
||||
use codex_protocol::protocol::SandboxCommandAssessment;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
@@ -226,6 +226,7 @@ pub struct ExecCommandApprovalParams {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub reason: Option<String>,
|
||||
pub risk: Option<SandboxCommandAssessment>,
|
||||
pub parsed_cmd: Vec<ParsedCommand>,
|
||||
}
|
||||
|
||||
@@ -360,7 +361,7 @@ pub struct Tools {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SandboxSettings {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
pub writable_roots: Vec<PathBuf>,
|
||||
pub network_access: Option<bool>,
|
||||
pub exclude_tmpdir_env_var: Option<bool>,
|
||||
pub exclude_slash_tmp: Option<bool>,
|
||||
|
||||
@@ -3,11 +3,8 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::protocol::common::AuthMode;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::approvals::SandboxCommandAssessment as CoreSandboxCommandAssessment;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -15,19 +12,14 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
|
||||
use codex_protocol::plan_tool::PlanItemArg as CorePlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
use codex_protocol::protocol::SessionSource as CoreSessionSource;
|
||||
use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
|
||||
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
|
||||
use codex_protocol::protocol::SkillScope as CoreSkillScope;
|
||||
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use mcp_types::ContentBlock as McpContentBlock;
|
||||
use mcp_types::Resource as McpResource;
|
||||
use mcp_types::ResourceTemplate as McpResourceTemplate;
|
||||
@@ -130,68 +122,17 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[ts(rename_all = "kebab-case", export_to = "v2/")]
|
||||
pub enum AskForApproval {
|
||||
#[serde(rename = "untrusted")]
|
||||
#[ts(rename = "untrusted")]
|
||||
UnlessTrusted,
|
||||
OnFailure,
|
||||
OnRequest,
|
||||
Never,
|
||||
}
|
||||
|
||||
impl AskForApproval {
|
||||
pub fn to_core(self) -> CoreAskForApproval {
|
||||
match self {
|
||||
AskForApproval::UnlessTrusted => CoreAskForApproval::UnlessTrusted,
|
||||
AskForApproval::OnFailure => CoreAskForApproval::OnFailure,
|
||||
AskForApproval::OnRequest => CoreAskForApproval::OnRequest,
|
||||
AskForApproval::Never => CoreAskForApproval::Never,
|
||||
}
|
||||
v2_enum_from_core!(
|
||||
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
|
||||
UnlessTrusted, OnFailure, OnRequest, Never
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
impl From<CoreAskForApproval> for AskForApproval {
|
||||
fn from(value: CoreAskForApproval) -> Self {
|
||||
match value {
|
||||
CoreAskForApproval::UnlessTrusted => AskForApproval::UnlessTrusted,
|
||||
CoreAskForApproval::OnFailure => AskForApproval::OnFailure,
|
||||
CoreAskForApproval::OnRequest => AskForApproval::OnRequest,
|
||||
CoreAskForApproval::Never => AskForApproval::Never,
|
||||
}
|
||||
v2_enum_from_core!(
|
||||
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
|
||||
ReadOnly, WorkspaceWrite, DangerFullAccess
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[ts(rename_all = "kebab-case", export_to = "v2/")]
|
||||
pub enum SandboxMode {
|
||||
ReadOnly,
|
||||
WorkspaceWrite,
|
||||
DangerFullAccess,
|
||||
}
|
||||
|
||||
impl SandboxMode {
|
||||
pub fn to_core(self) -> CoreSandboxMode {
|
||||
match self {
|
||||
SandboxMode::ReadOnly => CoreSandboxMode::ReadOnly,
|
||||
SandboxMode::WorkspaceWrite => CoreSandboxMode::WorkspaceWrite,
|
||||
SandboxMode::DangerFullAccess => CoreSandboxMode::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreSandboxMode> for SandboxMode {
|
||||
fn from(value: CoreSandboxMode) -> Self {
|
||||
match value {
|
||||
CoreSandboxMode::ReadOnly => SandboxMode::ReadOnly,
|
||||
CoreSandboxMode::WorkspaceWrite => SandboxMode::WorkspaceWrite,
|
||||
CoreSandboxMode::DangerFullAccess => SandboxMode::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum ReviewDelivery from codex_protocol::protocol::ReviewDelivery {
|
||||
@@ -218,72 +159,6 @@ pub enum ConfigLayerName {
|
||||
User,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<PathBuf>,
|
||||
#[serde(default)]
|
||||
pub network_access: bool,
|
||||
#[serde(default)]
|
||||
pub exclude_tmpdir_env_var: bool,
|
||||
#[serde(default)]
|
||||
pub exclude_slash_tmp: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ToolsV2 {
|
||||
#[serde(alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
pub view_image: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProfileV2 {
|
||||
pub model: Option<String>,
|
||||
pub model_provider: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct Config {
|
||||
pub model: Option<String>,
|
||||
pub review_model: Option<String>,
|
||||
pub model_context_window: Option<i64>,
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
pub model_provider: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
|
||||
pub forced_chatgpt_workspace_id: Option<String>,
|
||||
pub forced_login_method: Option<ForcedLoginMethod>,
|
||||
pub tools: Option<ToolsV2>,
|
||||
pub profile: Option<String>,
|
||||
#[serde(default)]
|
||||
pub profiles: HashMap<String, ProfileV2>,
|
||||
pub instructions: Option<String>,
|
||||
pub developer_instructions: Option<String>,
|
||||
pub compact_prompt: Option<String>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -334,8 +209,6 @@ pub struct OverriddenMetadata {
|
||||
pub struct ConfigWriteResponse {
|
||||
pub status: WriteStatus,
|
||||
pub version: String,
|
||||
/// Canonical path to the config file that was written.
|
||||
pub file_path: String,
|
||||
pub overridden_metadata: Option<OverriddenMetadata>,
|
||||
}
|
||||
|
||||
@@ -362,7 +235,7 @@ pub struct ConfigReadParams {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigReadResponse {
|
||||
pub config: Config,
|
||||
pub config: JsonValue,
|
||||
pub origins: HashMap<String, ConfigLayerMetadata>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub layers: Option<Vec<ConfigLayer>>,
|
||||
@@ -372,11 +245,10 @@ pub struct ConfigReadResponse {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigValueWriteParams {
|
||||
pub file_path: String,
|
||||
pub key_path: String,
|
||||
pub value: JsonValue,
|
||||
pub merge_strategy: MergeStrategy,
|
||||
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
|
||||
pub file_path: Option<String>,
|
||||
pub expected_version: Option<String>,
|
||||
}
|
||||
|
||||
@@ -384,9 +256,8 @@ pub struct ConfigValueWriteParams {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigBatchWriteParams {
|
||||
pub file_path: String,
|
||||
pub edits: Vec<ConfigEdit>,
|
||||
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
|
||||
pub file_path: Option<String>,
|
||||
pub expected_version: Option<String>,
|
||||
}
|
||||
|
||||
@@ -399,16 +270,19 @@ pub struct ConfigEdit {
|
||||
pub merge_strategy: MergeStrategy,
|
||||
}
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum CommandRiskLevel from codex_protocol::approvals::SandboxRiskLevel {
|
||||
Low,
|
||||
Medium,
|
||||
High
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ApprovalDecision {
|
||||
Accept,
|
||||
/// Approve and remember the approval for the session.
|
||||
AcceptForSession,
|
||||
AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment: ExecPolicyAmendment,
|
||||
},
|
||||
Decline,
|
||||
Cancel,
|
||||
}
|
||||
@@ -424,7 +298,7 @@ pub enum SandboxPolicy {
|
||||
#[ts(rename_all = "camelCase")]
|
||||
WorkspaceWrite {
|
||||
#[serde(default)]
|
||||
writable_roots: Vec<AbsolutePathBuf>,
|
||||
writable_roots: Vec<PathBuf>,
|
||||
#[serde(default)]
|
||||
network_access: bool,
|
||||
#[serde(default)]
|
||||
@@ -478,23 +352,28 @@ impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(transparent)]
|
||||
#[ts(type = "Array<string>", export_to = "v2/")]
|
||||
pub struct ExecPolicyAmendment {
|
||||
pub command: Vec<String>,
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SandboxCommandAssessment {
|
||||
pub description: String,
|
||||
pub risk_level: CommandRiskLevel,
|
||||
}
|
||||
|
||||
impl ExecPolicyAmendment {
|
||||
pub fn into_core(self) -> CoreExecPolicyAmendment {
|
||||
CoreExecPolicyAmendment::new(self.command)
|
||||
impl SandboxCommandAssessment {
|
||||
pub fn into_core(self) -> CoreSandboxCommandAssessment {
|
||||
CoreSandboxCommandAssessment {
|
||||
description: self.description,
|
||||
risk_level: self.risk_level.to_core(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreExecPolicyAmendment> for ExecPolicyAmendment {
|
||||
fn from(value: CoreExecPolicyAmendment) -> Self {
|
||||
impl From<CoreSandboxCommandAssessment> for SandboxCommandAssessment {
|
||||
fn from(value: CoreSandboxCommandAssessment) -> Self {
|
||||
Self {
|
||||
command: value.command().to_vec(),
|
||||
description: value.description,
|
||||
risk_level: CommandRiskLevel::from(value.risk_level),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -672,21 +551,10 @@ pub struct CancelLoginAccountParams {
|
||||
pub login_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum CancelLoginAccountStatus {
|
||||
Canceled,
|
||||
NotFound,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CancelLoginAccountResponse {
|
||||
pub status: CancelLoginAccountStatus,
|
||||
}
|
||||
pub struct CancelLoginAccountResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -789,26 +657,6 @@ pub struct ListMcpServersResponse {
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginParams {
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub scopes: Option<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub timeout_secs: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginResponse {
|
||||
pub authorization_url: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -961,83 +809,14 @@ pub struct ThreadListResponse {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillsListParams {
|
||||
/// When empty, defaults to the current session working directory.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub cwds: Vec<PathBuf>,
|
||||
pub struct ThreadCompactParams {
|
||||
pub thread_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillsListResponse {
|
||||
pub data: Vec<SkillsListEntry>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum SkillScope {
|
||||
User,
|
||||
Repo,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillMetadata {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub path: PathBuf,
|
||||
pub scope: SkillScope,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillErrorInfo {
|
||||
pub path: PathBuf,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillsListEntry {
|
||||
pub cwd: PathBuf,
|
||||
pub skills: Vec<SkillMetadata>,
|
||||
pub errors: Vec<SkillErrorInfo>,
|
||||
}
|
||||
|
||||
impl From<CoreSkillMetadata> for SkillMetadata {
|
||||
fn from(value: CoreSkillMetadata) -> Self {
|
||||
Self {
|
||||
name: value.name,
|
||||
description: value.description,
|
||||
path: value.path,
|
||||
scope: value.scope.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreSkillScope> for SkillScope {
|
||||
fn from(value: CoreSkillScope) -> Self {
|
||||
match value {
|
||||
CoreSkillScope::User => Self::User,
|
||||
CoreSkillScope::Repo => Self::Repo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreSkillErrorInfo> for SkillErrorInfo {
|
||||
fn from(value: CoreSkillErrorInfo) -> Self {
|
||||
Self {
|
||||
path: value.path,
|
||||
message: value.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct ThreadCompactResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -1159,9 +938,6 @@ pub struct TurnError {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ErrorNotification {
|
||||
pub error: TurnError,
|
||||
// Set to true if the error is transient and the app-server process will automatically retry.
|
||||
// If true, this will not interrupt a turn.
|
||||
pub will_retry: bool,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
}
|
||||
@@ -1361,9 +1137,6 @@ pub enum ThreadItem {
|
||||
arguments: JsonValue,
|
||||
result: Option<McpToolCallResult>,
|
||||
error: Option<McpToolCallError>,
|
||||
/// The duration of the MCP tool call in milliseconds.
|
||||
#[ts(type = "number | null")]
|
||||
duration_ms: Option<i64>,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
@@ -1627,17 +1400,6 @@ pub struct ReasoningTextDeltaNotification {
|
||||
pub content_index: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TerminalInteractionNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub process_id: String,
|
||||
pub stdin: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -1668,17 +1430,6 @@ pub struct McpToolCallProgressNotification {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginCompletedNotification {
|
||||
pub name: String,
|
||||
pub success: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -1705,8 +1456,17 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
pub item_id: String,
|
||||
/// Optional explanatory reason (e.g. request for network access).
|
||||
pub reason: Option<String>,
|
||||
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
|
||||
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
|
||||
/// Optional model-provided risk assessment describing the blocked command.
|
||||
pub risk: Option<SandboxCommandAssessment>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestAcceptSettings {
|
||||
/// If true, automatically approve this command for the duration of the session.
|
||||
#[serde(default)]
|
||||
pub for_session: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1714,6 +1474,10 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestApprovalResponse {
|
||||
pub decision: ApprovalDecision,
|
||||
/// Optional approval settings for when the decision is `accept`.
|
||||
/// Ignored if the decision is `decline` or `cancel`.
|
||||
#[serde(default)]
|
||||
pub accept_settings: Option<CommandExecutionRequestAcceptSettings>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1750,7 +1514,6 @@ pub struct RateLimitSnapshot {
|
||||
pub primary: Option<RateLimitWindow>,
|
||||
pub secondary: Option<RateLimitWindow>,
|
||||
pub credits: Option<CreditsSnapshot>,
|
||||
pub plan_type: Option<PlanType>,
|
||||
}
|
||||
|
||||
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
|
||||
@@ -1759,7 +1522,6 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
|
||||
primary: value.primary.map(RateLimitWindow::from),
|
||||
secondary: value.secondary.map(RateLimitWindow::from),
|
||||
credits: value.credits.map(CreditsSnapshot::from),
|
||||
plan_type: value.plan_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::CommandExecutionRequestAcceptSettings;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
@@ -553,10 +554,6 @@ impl CodexClient {
|
||||
print!("{}", delta.delta);
|
||||
std::io::stdout().flush().ok();
|
||||
}
|
||||
ServerNotification::TerminalInteraction(delta) => {
|
||||
println!("[stdin sent: {}]", delta.stdin);
|
||||
std::io::stdout().flush().ok();
|
||||
}
|
||||
ServerNotification::ItemStarted(payload) => {
|
||||
println!("\n< item started: {:?}", payload.item);
|
||||
}
|
||||
@@ -756,7 +753,7 @@ impl CodexClient {
|
||||
turn_id,
|
||||
item_id,
|
||||
reason,
|
||||
proposed_execpolicy_amendment,
|
||||
risk,
|
||||
} = params;
|
||||
|
||||
println!(
|
||||
@@ -765,12 +762,13 @@ impl CodexClient {
|
||||
if let Some(reason) = reason.as_deref() {
|
||||
println!("< reason: {reason}");
|
||||
}
|
||||
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
|
||||
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
|
||||
if let Some(risk) = risk.as_ref() {
|
||||
println!("< risk assessment: {risk:?}");
|
||||
}
|
||||
|
||||
let response = CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Accept,
|
||||
accept_settings: Some(CommandExecutionRequestAcceptSettings { for_session: false }),
|
||||
};
|
||||
self.send_server_request_response(request_id, &response)?;
|
||||
println!("< approved commandExecution request for item {item_id}");
|
||||
|
||||
@@ -26,11 +26,11 @@ codex-login = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-feedback = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-utils-json-to-toml = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
@@ -43,6 +43,7 @@ tokio = { workspace = true, features = [
|
||||
] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
|
||||
opentelemetry-appender-tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -65,9 +65,6 @@ Example (from OpenAI's official VSCode extension):
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `model/list` — list available models (with reasoning effort options).
|
||||
- `skills/list` — list skills for one or more `cwd` values.
|
||||
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
|
||||
- `mcpServers/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
@@ -369,8 +366,6 @@ The JSON-RPC auth/account surface exposes request/response methods plus server-i
|
||||
- `account/logout` — sign out; triggers `account/updated`.
|
||||
- `account/updated` (notify) — emitted whenever auth mode changes (`authMode`: `apikey`, `chatgpt`, or `null`).
|
||||
- `account/rateLimits/read` — fetch ChatGPT rate limits; updates arrive via `account/rateLimits/updated` (notify).
|
||||
- `account/rateLimits/updated` (notify) — emitted whenever a user's ChatGPT rate limits change.
|
||||
- `mcpServer/oauthLogin/completed` (notify) — emitted after a `mcpServer/oauth/login` flow finishes for a server; payload includes `{ name, success, error? }`.
|
||||
|
||||
### 1) Check auth state
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ use codex_app_server_protocol::ContextCompactedNotification;
|
||||
use codex_app_server_protocol::ErrorNotification;
|
||||
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
||||
use codex_app_server_protocol::ExecPolicyAmendment as V2ExecPolicyAmendment;
|
||||
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
@@ -34,9 +33,9 @@ use codex_app_server_protocol::PatchChangeKind as V2PatchChangeKind;
|
||||
use codex_app_server_protocol::ReasoningSummaryPartAddedNotification;
|
||||
use codex_app_server_protocol::ReasoningSummaryTextDeltaNotification;
|
||||
use codex_app_server_protocol::ReasoningTextDeltaNotification;
|
||||
use codex_app_server_protocol::SandboxCommandAssessment as V2SandboxCommandAssessment;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_app_server_protocol::TerminalInteractionNotification;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadTokenUsage;
|
||||
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
|
||||
@@ -179,7 +178,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
command,
|
||||
cwd,
|
||||
reason,
|
||||
proposed_execpolicy_amendment,
|
||||
risk,
|
||||
parsed_cmd,
|
||||
}) => match api_version {
|
||||
ApiVersion::V1 => {
|
||||
@@ -189,6 +188,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
command,
|
||||
cwd,
|
||||
reason,
|
||||
risk,
|
||||
parsed_cmd,
|
||||
};
|
||||
let rx = outgoing
|
||||
@@ -206,8 +206,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
let command_string = shlex_join(&command);
|
||||
let proposed_execpolicy_amendment_v2 =
|
||||
proposed_execpolicy_amendment.map(V2ExecPolicyAmendment::from);
|
||||
|
||||
let params = CommandExecutionRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
@@ -216,7 +214,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
item_id: item_id.clone(),
|
||||
reason,
|
||||
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
|
||||
risk: risk.map(V2SandboxCommandAssessment::from),
|
||||
};
|
||||
let rx = outgoing
|
||||
.send_request(ServerRequestPayload::CommandExecutionRequestApproval(
|
||||
@@ -334,7 +332,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
error: turn_error,
|
||||
will_retry: false,
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
}))
|
||||
@@ -350,7 +347,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
error: turn_error,
|
||||
will_retry: true,
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
}))
|
||||
@@ -570,20 +566,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
}
|
||||
}
|
||||
EventMsg::TerminalInteraction(terminal_event) => {
|
||||
let item_id = terminal_event.call_id.clone();
|
||||
|
||||
let notification = TerminalInteractionNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item_id,
|
||||
process_id: terminal_event.process_id,
|
||||
stdin: terminal_event.stdin,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::TerminalInteraction(notification))
|
||||
.await;
|
||||
}
|
||||
EventMsg::ExecCommandEnd(exec_command_end_event) => {
|
||||
let ExecCommandEndEvent {
|
||||
call_id,
|
||||
@@ -1062,11 +1044,7 @@ async fn on_file_change_request_approval_response(
|
||||
});
|
||||
|
||||
let (decision, completion_status) = match response.decision {
|
||||
ApprovalDecision::Accept
|
||||
| ApprovalDecision::AcceptForSession
|
||||
| ApprovalDecision::AcceptWithExecpolicyAmendment { .. } => {
|
||||
(ReviewDecision::Approved, None)
|
||||
}
|
||||
ApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
ApprovalDecision::Decline => {
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
@@ -1128,27 +1106,25 @@ async fn on_command_execution_request_approval_response(
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
}
|
||||
});
|
||||
|
||||
let decision = response.decision;
|
||||
let CommandExecutionRequestApprovalResponse {
|
||||
decision,
|
||||
accept_settings,
|
||||
} = response;
|
||||
|
||||
let (decision, completion_status) = match decision {
|
||||
ApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
ApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
|
||||
ApprovalDecision::AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment,
|
||||
} => (
|
||||
ReviewDecision::ApprovedExecpolicyAmendment {
|
||||
proposed_execpolicy_amendment: execpolicy_amendment.into_core(),
|
||||
},
|
||||
None,
|
||||
),
|
||||
ApprovalDecision::Decline => (
|
||||
let (decision, completion_status) = match (decision, accept_settings) {
|
||||
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
|
||||
(ReviewDecision::ApprovedForSession, None)
|
||||
}
|
||||
(ApprovalDecision::Accept, _) => (ReviewDecision::Approved, None),
|
||||
(ApprovalDecision::Decline, _) => (
|
||||
ReviewDecision::Denied,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
ApprovalDecision::Cancel => (
|
||||
(ApprovalDecision::Cancel, _) => (
|
||||
ReviewDecision::Abort,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
@@ -1201,7 +1177,6 @@ async fn construct_mcp_tool_call_notification(
|
||||
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
};
|
||||
ItemStartedNotification {
|
||||
thread_id,
|
||||
@@ -1210,7 +1185,7 @@ async fn construct_mcp_tool_call_notification(
|
||||
}
|
||||
}
|
||||
|
||||
/// similar to handle_mcp_tool_call_end in exec
|
||||
/// simiilar to handle_mcp_tool_call_end in exec
|
||||
async fn construct_mcp_tool_call_end_notification(
|
||||
end_event: McpToolCallEndEvent,
|
||||
thread_id: String,
|
||||
@@ -1221,7 +1196,6 @@ async fn construct_mcp_tool_call_end_notification(
|
||||
} else {
|
||||
McpToolCallStatus::Failed
|
||||
};
|
||||
let duration_ms = i64::try_from(end_event.duration.as_millis()).ok();
|
||||
|
||||
let (result, error) = match &end_event.result {
|
||||
Ok(value) => (
|
||||
@@ -1247,7 +1221,6 @@ async fn construct_mcp_tool_call_end_notification(
|
||||
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
result,
|
||||
error,
|
||||
duration_ms,
|
||||
};
|
||||
ItemCompletedNotification {
|
||||
thread_id,
|
||||
@@ -1520,7 +1493,6 @@ mod tests {
|
||||
unlimited: false,
|
||||
balance: Some("5".to_string()),
|
||||
}),
|
||||
plan_type: None,
|
||||
};
|
||||
|
||||
handle_token_count_event(
|
||||
@@ -1625,7 +1597,6 @@ mod tests {
|
||||
arguments: serde_json::json!({"server": ""}),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1779,7 +1750,6 @@ mod tests {
|
||||
arguments: JsonValue::Null,
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1833,7 +1803,6 @@ mod tests {
|
||||
structured_content: None,
|
||||
}),
|
||||
error: None,
|
||||
duration_ms: Some(0),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1875,7 +1844,6 @@ mod tests {
|
||||
error: Some(McpToolCallError {
|
||||
message: "boom".to_string(),
|
||||
}),
|
||||
duration_ms: Some(1),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::AuthStatusChangeNotification;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||
use codex_app_server_protocol::CancelLoginAccountStatus;
|
||||
use codex_app_server_protocol::CancelLoginChatGptResponse;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::CommandExecParams;
|
||||
@@ -56,9 +55,6 @@ use codex_app_server_protocol::LoginChatGptResponse;
|
||||
use codex_app_server_protocol::LogoutAccountResponse;
|
||||
use codex_app_server_protocol::LogoutChatGptResponse;
|
||||
use codex_app_server_protocol::McpServer;
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::McpServerOauthLoginParams;
|
||||
use codex_app_server_protocol::McpServerOauthLoginResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
@@ -81,8 +77,6 @@ use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::SessionConfiguredNotification;
|
||||
use codex_app_server_protocol::SetDefaultModelParams;
|
||||
use codex_app_server_protocol::SetDefaultModelResponse;
|
||||
use codex_app_server_protocol::SkillsListParams;
|
||||
use codex_app_server_protocol::SkillsListResponse;
|
||||
use codex_app_server_protocol::Thread;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
@@ -119,9 +113,9 @@ use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigToml;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::config_loader::load_config_as_toml;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
@@ -138,7 +132,6 @@ use codex_core::protocol::ReviewRequest;
|
||||
use codex_core::protocol::ReviewTarget as CoreReviewTarget;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::read_head_for_summary;
|
||||
use codex_core::sandboxing::SandboxPermissions;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_login::ServerOptions as LoginServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
@@ -154,7 +147,6 @@ use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_utils_json_to_toml::json_to_toml;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
@@ -169,7 +161,6 @@ use std::time::Duration;
|
||||
use tokio::select;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::oneshot;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
@@ -187,9 +178,6 @@ pub(crate) struct TurnSummary {
|
||||
|
||||
pub(crate) type TurnSummaryStore = Arc<Mutex<HashMap<ConversationId, TurnSummary>>>;
|
||||
|
||||
const THREAD_LIST_DEFAULT_LIMIT: usize = 25;
|
||||
const THREAD_LIST_MAX_LIMIT: usize = 100;
|
||||
|
||||
// Duration before a ChatGPT login attempt is abandoned.
|
||||
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
||||
struct ActiveLogin {
|
||||
@@ -197,11 +185,6 @@ struct ActiveLogin {
|
||||
login_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
enum CancelLoginError {
|
||||
NotFound(Uuid),
|
||||
}
|
||||
|
||||
impl Drop for ActiveLogin {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown_handle.shutdown();
|
||||
@@ -215,7 +198,6 @@ pub(crate) struct CodexMessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
||||
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
||||
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
||||
@@ -262,7 +244,6 @@ impl CodexMessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -271,7 +252,6 @@ impl CodexMessageProcessor {
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
config,
|
||||
cli_overrides,
|
||||
conversation_listeners: HashMap::new(),
|
||||
active_login: Arc::new(Mutex::new(None)),
|
||||
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
|
||||
@@ -281,16 +261,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_latest_config(&self) -> Result<Config, JSONRPCErrorError> {
|
||||
Config::load_with_cli_overrides(self.cli_overrides.clone(), ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn review_request_from_target(
|
||||
target: ApiReviewTarget,
|
||||
) -> Result<(ReviewRequest, String), JSONRPCErrorError> {
|
||||
@@ -368,8 +338,12 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::ThreadList { request_id, params } => {
|
||||
self.thread_list(request_id, params).await;
|
||||
}
|
||||
ClientRequest::SkillsList { request_id, params } => {
|
||||
self.skills_list(request_id, params).await;
|
||||
ClientRequest::ThreadCompact {
|
||||
request_id,
|
||||
params: _,
|
||||
} => {
|
||||
self.send_unimplemented_error(request_id, "thread/compact")
|
||||
.await;
|
||||
}
|
||||
ClientRequest::TurnStart { request_id, params } => {
|
||||
self.turn_start(request_id, params).await;
|
||||
@@ -395,9 +369,6 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::ModelList { request_id, params } => {
|
||||
self.list_models(request_id, params).await;
|
||||
}
|
||||
ClientRequest::McpServerOauthLogin { request_id, params } => {
|
||||
self.mcp_server_oauth_login(request_id, params).await;
|
||||
}
|
||||
ClientRequest::McpServersList { request_id, params } => {
|
||||
self.list_mcp_servers(request_id, params).await;
|
||||
}
|
||||
@@ -508,6 +479,15 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_unimplemented_error(&self, request_id: RequestId, method: &str) {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("{method} is not implemented yet"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
|
||||
async fn login_v2(&mut self, request_id: RequestId, params: LoginAccountParams) {
|
||||
match params {
|
||||
LoginAccountParams::ApiKey { api_key } => {
|
||||
@@ -822,7 +802,7 @@ impl CodexMessageProcessor {
|
||||
async fn cancel_login_chatgpt_common(
|
||||
&mut self,
|
||||
login_id: Uuid,
|
||||
) -> std::result::Result<(), CancelLoginError> {
|
||||
) -> std::result::Result<(), JSONRPCErrorError> {
|
||||
let mut guard = self.active_login.lock().await;
|
||||
if guard.as_ref().map(|l| l.login_id) == Some(login_id) {
|
||||
if let Some(active) = guard.take() {
|
||||
@@ -830,7 +810,11 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CancelLoginError::NotFound(login_id))
|
||||
Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("login id not found: {login_id}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -841,12 +825,7 @@ impl CodexMessageProcessor {
|
||||
.send_response(request_id, CancelLoginChatGptResponse {})
|
||||
.await;
|
||||
}
|
||||
Err(CancelLoginError::NotFound(missing_login_id)) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("login id not found: {missing_login_id}"),
|
||||
data: None,
|
||||
};
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
}
|
||||
@@ -855,14 +834,16 @@ impl CodexMessageProcessor {
|
||||
async fn cancel_login_v2(&mut self, request_id: RequestId, params: CancelLoginAccountParams) {
|
||||
let login_id = params.login_id;
|
||||
match Uuid::parse_str(&login_id) {
|
||||
Ok(uuid) => {
|
||||
let status = match self.cancel_login_chatgpt_common(uuid).await {
|
||||
Ok(()) => CancelLoginAccountStatus::Canceled,
|
||||
Err(CancelLoginError::NotFound(_)) => CancelLoginAccountStatus::NotFound,
|
||||
};
|
||||
let response = CancelLoginAccountResponse { status };
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
Ok(uuid) => match self.cancel_login_chatgpt_common(uuid).await {
|
||||
Ok(()) => {
|
||||
self.outgoing
|
||||
.send_response(request_id, CancelLoginAccountResponse {})
|
||||
.await;
|
||||
}
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
@@ -1096,13 +1077,12 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn get_user_saved_config(&self, request_id: RequestId) {
|
||||
let service = ConfigService::new(self.config.codex_home.clone(), Vec::new());
|
||||
let user_saved_config: UserSavedConfig = match service.load_user_saved_config().await {
|
||||
Ok(config) => config,
|
||||
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: err.to_string(),
|
||||
message: format!("failed to load config.toml: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
@@ -1110,6 +1090,21 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
};
|
||||
|
||||
let cfg: ConfigToml = match toml_value.try_into() {
|
||||
Ok(cfg) => cfg,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to parse config.toml: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let user_saved_config: UserSavedConfig = cfg.into();
|
||||
|
||||
let response = GetUserSavedConfigResponse {
|
||||
config: user_saved_config,
|
||||
};
|
||||
@@ -1174,7 +1169,7 @@ impl CodexMessageProcessor {
|
||||
cwd,
|
||||
expiration: timeout_ms.into(),
|
||||
env,
|
||||
sandbox_permissions: SandboxPermissions::UseDefault,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
};
|
||||
@@ -1254,7 +1249,7 @@ impl CodexMessageProcessor {
|
||||
let mut cli_overrides = cli_overrides.unwrap_or_default();
|
||||
if cfg!(windows) && self.config.features.enabled(Feature::WindowsSandbox) {
|
||||
cli_overrides.insert(
|
||||
"features.experimental_windows_sandbox".to_string(),
|
||||
"features.enable_experimental_windows_sandbox".to_string(),
|
||||
serde_json::json!(true),
|
||||
);
|
||||
}
|
||||
@@ -1490,12 +1485,10 @@ impl CodexMessageProcessor {
|
||||
model_providers,
|
||||
} = params;
|
||||
|
||||
let requested_page_size = limit
|
||||
.map(|value| value as usize)
|
||||
.unwrap_or(THREAD_LIST_DEFAULT_LIMIT)
|
||||
.clamp(1, THREAD_LIST_MAX_LIMIT);
|
||||
let page_size = limit.unwrap_or(25).max(1) as usize;
|
||||
|
||||
let (summaries, next_cursor) = match self
|
||||
.list_conversations_common(requested_page_size, cursor, model_providers)
|
||||
.list_conversations_common(page_size, cursor, model_providers)
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
@@ -1506,6 +1499,7 @@ impl CodexMessageProcessor {
|
||||
};
|
||||
|
||||
let data = summaries.into_iter().map(summary_to_thread).collect();
|
||||
|
||||
let response = ThreadListResponse { data, next_cursor };
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
@@ -1783,12 +1777,10 @@ impl CodexMessageProcessor {
|
||||
cursor,
|
||||
model_providers,
|
||||
} = params;
|
||||
let requested_page_size = page_size
|
||||
.unwrap_or(THREAD_LIST_DEFAULT_LIMIT)
|
||||
.clamp(1, THREAD_LIST_MAX_LIMIT);
|
||||
let page_size = page_size.unwrap_or(25).max(1);
|
||||
|
||||
match self
|
||||
.list_conversations_common(requested_page_size, cursor, model_providers)
|
||||
.list_conversations_common(page_size, cursor, model_providers)
|
||||
.await
|
||||
{
|
||||
Ok((items, next_cursor)) => {
|
||||
@@ -1803,15 +1795,12 @@ impl CodexMessageProcessor {
|
||||
|
||||
async fn list_conversations_common(
|
||||
&self,
|
||||
requested_page_size: usize,
|
||||
page_size: usize,
|
||||
cursor: Option<String>,
|
||||
model_providers: Option<Vec<String>>,
|
||||
) -> Result<(Vec<ConversationSummary>, Option<String>), JSONRPCErrorError> {
|
||||
let mut cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
|
||||
let mut last_cursor = cursor_obj.clone();
|
||||
let mut remaining = requested_page_size;
|
||||
let mut items = Vec::with_capacity(requested_page_size);
|
||||
let mut next_cursor: Option<String> = None;
|
||||
let cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
|
||||
let cursor_ref = cursor_obj.as_ref();
|
||||
|
||||
let model_provider_filter = match model_providers {
|
||||
Some(providers) => {
|
||||
@@ -1825,76 +1814,56 @@ impl CodexMessageProcessor {
|
||||
};
|
||||
let fallback_provider = self.config.model_provider_id.clone();
|
||||
|
||||
while remaining > 0 {
|
||||
let page_size = remaining.min(THREAD_LIST_MAX_LIMIT);
|
||||
let page = RolloutRecorder::list_conversations(
|
||||
&self.config.codex_home,
|
||||
page_size,
|
||||
cursor_obj.as_ref(),
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
model_provider_filter.as_deref(),
|
||||
fallback_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to list conversations: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let mut filtered = page
|
||||
.items
|
||||
.into_iter()
|
||||
.filter_map(|it| {
|
||||
let session_meta_line = it.head.first().and_then(|first| {
|
||||
serde_json::from_value::<SessionMetaLine>(first.clone()).ok()
|
||||
})?;
|
||||
extract_conversation_summary(
|
||||
it.path,
|
||||
&it.head,
|
||||
&session_meta_line.meta,
|
||||
session_meta_line.git.as_ref(),
|
||||
fallback_provider.as_str(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if filtered.len() > remaining {
|
||||
filtered.truncate(remaining);
|
||||
let page = match RolloutRecorder::list_conversations(
|
||||
&self.config.codex_home,
|
||||
page_size,
|
||||
cursor_ref,
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
model_provider_filter.as_deref(),
|
||||
fallback_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(err) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to list conversations: {err}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
items.extend(filtered);
|
||||
remaining = requested_page_size.saturating_sub(items.len());
|
||||
};
|
||||
|
||||
// Encode RolloutCursor into the JSON-RPC string form returned to clients.
|
||||
let next_cursor_value = page.next_cursor.clone();
|
||||
next_cursor = next_cursor_value
|
||||
.as_ref()
|
||||
.and_then(|cursor| serde_json::to_value(cursor).ok())
|
||||
.and_then(|value| value.as_str().map(str::to_owned));
|
||||
if remaining == 0 {
|
||||
break;
|
||||
}
|
||||
let items = page
|
||||
.items
|
||||
.into_iter()
|
||||
.filter_map(|it| {
|
||||
let session_meta_line = it.head.first().and_then(|first| {
|
||||
serde_json::from_value::<SessionMetaLine>(first.clone()).ok()
|
||||
})?;
|
||||
extract_conversation_summary(
|
||||
it.path,
|
||||
&it.head,
|
||||
&session_meta_line.meta,
|
||||
session_meta_line.git.as_ref(),
|
||||
fallback_provider.as_str(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
match next_cursor_value {
|
||||
Some(cursor_val) if remaining > 0 => {
|
||||
// Break if our pagination would reuse the same cursor again; this avoids
|
||||
// an infinite loop when filtering drops everything on the page.
|
||||
if last_cursor.as_ref() == Some(&cursor_val) {
|
||||
next_cursor = None;
|
||||
break;
|
||||
}
|
||||
last_cursor = Some(cursor_val.clone());
|
||||
cursor_obj = Some(cursor_val);
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
// Encode next_cursor as a plain string
|
||||
let next_cursor = page
|
||||
.next_cursor
|
||||
.and_then(|cursor| serde_json::to_value(&cursor).ok())
|
||||
.and_then(|value| value.as_str().map(str::to_owned));
|
||||
|
||||
Ok((items, next_cursor))
|
||||
}
|
||||
|
||||
async fn list_models(&self, request_id: RequestId, params: ModelListParams) {
|
||||
let ModelListParams { limit, cursor } = params;
|
||||
let models = supported_models(self.conversation_manager.clone(), &self.config).await;
|
||||
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
|
||||
let models = supported_models(auth_mode);
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
@@ -1948,124 +1917,13 @@ impl CodexMessageProcessor {
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn mcp_server_oauth_login(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
params: McpServerOauthLoginParams,
|
||||
) {
|
||||
let config = match self.load_latest_config().await {
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: "OAuth login is only supported when [features].rmcp_client is true in config.toml".to_string(),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let McpServerOauthLoginParams {
|
||||
name,
|
||||
scopes,
|
||||
timeout_secs,
|
||||
} = params;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("No MCP server named '{name}' found."),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
};
|
||||
|
||||
let (url, http_headers, env_http_headers) = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
..
|
||||
} => (url.clone(), http_headers.clone(), env_http_headers.clone()),
|
||||
_ => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: "OAuth login is only supported for streamable HTTP servers."
|
||||
.to_string(),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match perform_oauth_login_return_url(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
scopes.as_deref().unwrap_or_default(),
|
||||
timeout_secs,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(handle) => {
|
||||
let authorization_url = handle.authorization_url().to_string();
|
||||
let notification_name = name.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let (success, error) = match handle.wait().await {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
|
||||
let response = McpServerOauthLoginResponse { authorization_url };
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to login to MCP server '{name}': {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_mcp_servers(&self, request_id: RequestId, params: ListMcpServersParams) {
|
||||
let config = match self.load_latest_config().await {
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let snapshot = collect_mcp_snapshot(&config).await;
|
||||
let snapshot = collect_mcp_snapshot(self.config.as_ref()).await;
|
||||
|
||||
let tools_by_server = group_tools_by_server(&snapshot.tools);
|
||||
|
||||
let mut server_names: Vec<String> = config
|
||||
let mut server_names: Vec<String> = self
|
||||
.config
|
||||
.mcp_servers
|
||||
.keys()
|
||||
.cloned()
|
||||
@@ -2171,7 +2029,7 @@ impl CodexMessageProcessor {
|
||||
let mut cli_overrides = cli_overrides.unwrap_or_default();
|
||||
if cfg!(windows) && self.config.features.enabled(Feature::WindowsSandbox) {
|
||||
cli_overrides.insert(
|
||||
"features.experimental_windows_sandbox".to_string(),
|
||||
"features.enable_experimental_windows_sandbox".to_string(),
|
||||
serde_json::json!(true),
|
||||
);
|
||||
}
|
||||
@@ -2604,42 +2462,6 @@ impl CodexMessageProcessor {
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn skills_list(&self, request_id: RequestId, params: SkillsListParams) {
|
||||
let SkillsListParams { cwds } = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.clone()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
|
||||
let data = if self.config.features.enabled(Feature::Skills) {
|
||||
let skills_manager = self.conversation_manager.skills_manager();
|
||||
cwds.into_iter()
|
||||
.map(|cwd| {
|
||||
let outcome = skills_manager.skills_for_cwd(&cwd);
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills);
|
||||
codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
cwds.into_iter()
|
||||
.map(|cwd| codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills: Vec::new(),
|
||||
errors: Vec::new(),
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
self.outgoing
|
||||
.send_response(request_id, SkillsListResponse { data })
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn interrupt_conversation(
|
||||
&mut self,
|
||||
request_id: RequestId,
|
||||
@@ -2848,7 +2670,7 @@ impl CodexMessageProcessor {
|
||||
})?;
|
||||
|
||||
let mut config = self.config.as_ref().clone();
|
||||
config.model = Some(self.config.review_model.clone());
|
||||
config.model = self.config.review_model.clone();
|
||||
|
||||
let NewConversation {
|
||||
conversation_id,
|
||||
@@ -3285,32 +3107,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
) -> Vec<codex_app_server_protocol::SkillMetadata> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
path: skill.path.clone(),
|
||||
scope: skill.scope.into(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn errors_to_info(
|
||||
errors: &[codex_core::skills::SkillError],
|
||||
) -> Vec<codex_app_server_protocol::SkillErrorInfo> {
|
||||
errors
|
||||
.iter()
|
||||
.map(|err| codex_app_server_protocol::SkillErrorInfo {
|
||||
path: err.path.clone(),
|
||||
message: err.message.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn derive_config_from_params(
|
||||
overrides: ConfigOverrides,
|
||||
cli_overrides: Option<std::collections::HashMap<String, serde_json::Value>>,
|
||||
|
||||
@@ -1,27 +1,65 @@
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use anyhow::anyhow;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigLayer;
|
||||
use codex_app_server_protocol::ConfigLayerMetadata;
|
||||
use codex_app_server_protocol::ConfigLayerName;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigServiceError;
|
||||
use codex_app_server_protocol::MergeStrategy;
|
||||
use codex_app_server_protocol::OverriddenMetadata;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_core::config::ConfigToml;
|
||||
use codex_core::config_loader::LoadedConfigLayers;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use codex_core::config_loader::load_config_layers_with_overrides;
|
||||
use codex_core::config_loader::merge_toml_values;
|
||||
use serde_json::Value as JsonValue;
|
||||
use serde_json::json;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::NamedTempFile;
|
||||
use tokio::task;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
const SESSION_FLAGS_SOURCE: &str = "--config";
|
||||
const MDM_SOURCE: &str = "com.openai.codex/config_toml_base64";
|
||||
const CONFIG_FILE_NAME: &str = "config.toml";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
service: ConfigService,
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
Self {
|
||||
service: ConfigService::new(codex_home, cli_overrides),
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn with_overrides(
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
loader_overrides,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,32 +67,646 @@ impl ConfigApi {
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
self.service.read(params).await.map_err(map_error)
|
||||
let layers = self
|
||||
.load_layers_state()
|
||||
.await
|
||||
.map_err(|err| internal_error("failed to read configuration layers", err))?;
|
||||
|
||||
let effective = layers.effective_config();
|
||||
validate_config(&effective).map_err(|err| internal_error("invalid configuration", err))?;
|
||||
|
||||
let response = ConfigReadResponse {
|
||||
config: to_json_value(&effective),
|
||||
origins: layers.origins(),
|
||||
layers: params.include_layers.then(|| layers.layers_high_to_low()),
|
||||
};
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
self.service.write_value(params).await.map_err(map_error)
|
||||
let edits = vec![(params.key_path, params.value, params.merge_strategy)];
|
||||
self.apply_edits(params.file_path, params.expected_version, edits)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
self.service.batch_write(params).await.map_err(map_error)
|
||||
let edits = params
|
||||
.edits
|
||||
.into_iter()
|
||||
.map(|edit| (edit.key_path, edit.value, edit.merge_strategy))
|
||||
.collect();
|
||||
|
||||
self.apply_edits(params.file_path, params.expected_version, edits)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn apply_edits(
|
||||
&self,
|
||||
file_path: String,
|
||||
expected_version: Option<String>,
|
||||
edits: Vec<(String, JsonValue, MergeStrategy)>,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let allowed_path = self.codex_home.join(CONFIG_FILE_NAME);
|
||||
if !paths_match(&allowed_path, &file_path) {
|
||||
return Err(config_write_error(
|
||||
ConfigWriteErrorCode::ConfigLayerReadonly,
|
||||
"Only writes to the user config are allowed",
|
||||
));
|
||||
}
|
||||
|
||||
let layers = self
|
||||
.load_layers_state()
|
||||
.await
|
||||
.map_err(|err| internal_error("failed to load configuration", err))?;
|
||||
|
||||
if let Some(expected) = expected_version.as_deref()
|
||||
&& expected != layers.user.version
|
||||
{
|
||||
return Err(config_write_error(
|
||||
ConfigWriteErrorCode::ConfigVersionConflict,
|
||||
"Configuration was modified since last read. Fetch latest version and retry.",
|
||||
));
|
||||
}
|
||||
|
||||
let mut user_config = layers.user.config.clone();
|
||||
let mut mutated = false;
|
||||
let mut parsed_segments = Vec::new();
|
||||
|
||||
for (key_path, value, strategy) in edits.into_iter() {
|
||||
let segments = parse_key_path(&key_path).map_err(|message| {
|
||||
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
|
||||
})?;
|
||||
let parsed_value = parse_value(value).map_err(|message| {
|
||||
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
|
||||
})?;
|
||||
|
||||
let changed = apply_merge(&mut user_config, &segments, parsed_value.as_ref(), strategy)
|
||||
.map_err(|err| match err {
|
||||
MergeError::PathNotFound => config_write_error(
|
||||
ConfigWriteErrorCode::ConfigPathNotFound,
|
||||
"Path not found",
|
||||
),
|
||||
MergeError::Validation(message) => {
|
||||
config_write_error(ConfigWriteErrorCode::ConfigValidationError, message)
|
||||
}
|
||||
})?;
|
||||
|
||||
mutated |= changed;
|
||||
parsed_segments.push(segments);
|
||||
}
|
||||
|
||||
validate_config(&user_config).map_err(|err| {
|
||||
config_write_error(
|
||||
ConfigWriteErrorCode::ConfigValidationError,
|
||||
format!("Invalid configuration: {err}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let updated_layers = layers.with_user_config(user_config.clone());
|
||||
let effective = updated_layers.effective_config();
|
||||
validate_config(&effective).map_err(|err| {
|
||||
config_write_error(
|
||||
ConfigWriteErrorCode::ConfigValidationError,
|
||||
format!("Invalid configuration: {err}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
if mutated {
|
||||
self.persist_user_config(&user_config)
|
||||
.await
|
||||
.map_err(|err| internal_error("failed to persist config.toml", err))?;
|
||||
}
|
||||
|
||||
let overridden = first_overridden_edit(&updated_layers, &effective, &parsed_segments);
|
||||
let status = overridden
|
||||
.as_ref()
|
||||
.map(|_| WriteStatus::OkOverridden)
|
||||
.unwrap_or(WriteStatus::Ok);
|
||||
|
||||
Ok(ConfigWriteResponse {
|
||||
status,
|
||||
version: updated_layers.user.version.clone(),
|
||||
overridden_metadata: overridden,
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_layers_state(&self) -> std::io::Result<LayersState> {
|
||||
let LoadedConfigLayers {
|
||||
base,
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
} = load_config_layers_with_overrides(&self.codex_home, self.loader_overrides.clone())
|
||||
.await?;
|
||||
|
||||
let user = LayerState::new(
|
||||
ConfigLayerName::User,
|
||||
self.codex_home.join(CONFIG_FILE_NAME),
|
||||
base,
|
||||
);
|
||||
|
||||
let session_flags = LayerState::new(
|
||||
ConfigLayerName::SessionFlags,
|
||||
PathBuf::from(SESSION_FLAGS_SOURCE),
|
||||
{
|
||||
let mut root = TomlValue::Table(toml::map::Map::new());
|
||||
for (path, value) in self.cli_overrides.iter() {
|
||||
apply_override(&mut root, path, value.clone());
|
||||
}
|
||||
root
|
||||
},
|
||||
);
|
||||
|
||||
let system = managed_config.map(|cfg| {
|
||||
LayerState::new(
|
||||
ConfigLayerName::System,
|
||||
system_config_path(&self.codex_home),
|
||||
cfg,
|
||||
)
|
||||
});
|
||||
|
||||
let mdm = managed_preferences
|
||||
.map(|cfg| LayerState::new(ConfigLayerName::Mdm, PathBuf::from(MDM_SOURCE), cfg));
|
||||
|
||||
Ok(LayersState {
|
||||
user,
|
||||
session_flags,
|
||||
system,
|
||||
mdm,
|
||||
})
|
||||
}
|
||||
|
||||
async fn persist_user_config(&self, user_config: &TomlValue) -> anyhow::Result<()> {
|
||||
let codex_home = self.codex_home.clone();
|
||||
let serialized = toml::to_string_pretty(user_config)?;
|
||||
|
||||
task::spawn_blocking(move || -> anyhow::Result<()> {
|
||||
std::fs::create_dir_all(&codex_home)?;
|
||||
|
||||
let target = codex_home.join(CONFIG_FILE_NAME);
|
||||
let tmp = NamedTempFile::new_in(&codex_home)?;
|
||||
std::fs::write(tmp.path(), serialized.as_bytes())?;
|
||||
tmp.persist(&target)?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.map_err(|err| anyhow!("config persistence task panicked: {err}"))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
fn parse_value(value: JsonValue) -> Result<Option<TomlValue>, String> {
|
||||
if value.is_null() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
serde_json::from_value::<TomlValue>(value)
|
||||
.map(Some)
|
||||
.map_err(|err| format!("invalid value: {err}"))
|
||||
}
|
||||
|
||||
fn parse_key_path(path: &str) -> Result<Vec<String>, String> {
|
||||
if path.trim().is_empty() {
|
||||
return Err("keyPath must not be empty".to_string());
|
||||
}
|
||||
Ok(path
|
||||
.split('.')
|
||||
.map(std::string::ToString::to_string)
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn apply_override(target: &mut TomlValue, path: &str, value: TomlValue) {
|
||||
use toml::value::Table;
|
||||
|
||||
let segments: Vec<&str> = path.split('.').collect();
|
||||
let mut current = target;
|
||||
|
||||
for (idx, segment) in segments.iter().enumerate() {
|
||||
let is_last = idx == segments.len() - 1;
|
||||
|
||||
if is_last {
|
||||
match current {
|
||||
TomlValue::Table(table) => {
|
||||
table.insert(segment.to_string(), value);
|
||||
}
|
||||
_ => {
|
||||
let mut table = Table::new();
|
||||
table.insert(segment.to_string(), value);
|
||||
*current = TomlValue::Table(table);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
match current {
|
||||
TomlValue::Table(table) => {
|
||||
current = table
|
||||
.entry((*segment).to_string())
|
||||
.or_insert_with(|| TomlValue::Table(Table::new()));
|
||||
}
|
||||
_ => {
|
||||
*current = TomlValue::Table(Table::new());
|
||||
if let TomlValue::Table(tbl) = current {
|
||||
current = tbl
|
||||
.entry((*segment).to_string())
|
||||
.or_insert_with(|| TomlValue::Table(Table::new()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum MergeError {
|
||||
PathNotFound,
|
||||
Validation(String),
|
||||
}
|
||||
|
||||
fn apply_merge(
|
||||
root: &mut TomlValue,
|
||||
segments: &[String],
|
||||
value: Option<&TomlValue>,
|
||||
strategy: MergeStrategy,
|
||||
) -> Result<bool, MergeError> {
|
||||
let Some(value) = value else {
|
||||
return clear_path(root, segments);
|
||||
};
|
||||
|
||||
let Some((last, parents)) = segments.split_last() else {
|
||||
return Err(MergeError::Validation(
|
||||
"keyPath must not be empty".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let mut current = root;
|
||||
|
||||
for segment in parents {
|
||||
match current {
|
||||
TomlValue::Table(table) => {
|
||||
current = table
|
||||
.entry(segment.clone())
|
||||
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
|
||||
}
|
||||
_ => {
|
||||
*current = TomlValue::Table(toml::map::Map::new());
|
||||
if let TomlValue::Table(table) = current {
|
||||
current = table
|
||||
.entry(segment.clone())
|
||||
.or_insert_with(|| TomlValue::Table(toml::map::Map::new()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let table = current.as_table_mut().ok_or_else(|| {
|
||||
MergeError::Validation("cannot set value on non-table parent".to_string())
|
||||
})?;
|
||||
|
||||
if matches!(strategy, MergeStrategy::Upsert)
|
||||
&& let Some(existing) = table.get_mut(last)
|
||||
&& matches!(existing, TomlValue::Table(_))
|
||||
&& matches!(value, TomlValue::Table(_))
|
||||
{
|
||||
merge_toml_values(existing, value);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let changed = table
|
||||
.get(last)
|
||||
.map(|existing| Some(existing) != Some(value))
|
||||
.unwrap_or(true);
|
||||
table.insert(last.clone(), value.clone());
|
||||
Ok(changed)
|
||||
}
|
||||
|
||||
fn clear_path(root: &mut TomlValue, segments: &[String]) -> Result<bool, MergeError> {
|
||||
let Some((last, parents)) = segments.split_last() else {
|
||||
return Err(MergeError::Validation(
|
||||
"keyPath must not be empty".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let mut current = root;
|
||||
for segment in parents {
|
||||
match current {
|
||||
TomlValue::Table(table) => {
|
||||
current = table.get_mut(segment).ok_or(MergeError::PathNotFound)?;
|
||||
}
|
||||
_ => return Err(MergeError::PathNotFound),
|
||||
}
|
||||
}
|
||||
|
||||
let Some(parent) = current.as_table_mut() else {
|
||||
return Err(MergeError::PathNotFound);
|
||||
};
|
||||
|
||||
Ok(parent.remove(last).is_some())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct LayerState {
|
||||
name: ConfigLayerName,
|
||||
source: PathBuf,
|
||||
config: TomlValue,
|
||||
version: String,
|
||||
}
|
||||
|
||||
impl LayerState {
|
||||
fn new(name: ConfigLayerName, source: PathBuf, config: TomlValue) -> Self {
|
||||
let version = version_for_toml(&config);
|
||||
Self {
|
||||
name,
|
||||
source,
|
||||
config,
|
||||
version,
|
||||
}
|
||||
}
|
||||
|
||||
fn metadata(&self) -> ConfigLayerMetadata {
|
||||
ConfigLayerMetadata {
|
||||
name: self.name.clone(),
|
||||
source: self.source.display().to_string(),
|
||||
version: self.version.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn as_layer(&self) -> ConfigLayer {
|
||||
ConfigLayer {
|
||||
name: self.name.clone(),
|
||||
source: self.source.display().to_string(),
|
||||
version: self.version.clone(),
|
||||
config: to_json_value(&self.config),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct LayersState {
|
||||
user: LayerState,
|
||||
session_flags: LayerState,
|
||||
system: Option<LayerState>,
|
||||
mdm: Option<LayerState>,
|
||||
}
|
||||
|
||||
impl LayersState {
|
||||
fn with_user_config(self, user_config: TomlValue) -> Self {
|
||||
Self {
|
||||
user: LayerState::new(self.user.name, self.user.source, user_config),
|
||||
session_flags: self.session_flags,
|
||||
system: self.system,
|
||||
mdm: self.mdm,
|
||||
}
|
||||
}
|
||||
|
||||
fn effective_config(&self) -> TomlValue {
|
||||
let mut merged = self.user.config.clone();
|
||||
merge_toml_values(&mut merged, &self.session_flags.config);
|
||||
if let Some(system) = &self.system {
|
||||
merge_toml_values(&mut merged, &system.config);
|
||||
}
|
||||
if let Some(mdm) = &self.mdm {
|
||||
merge_toml_values(&mut merged, &mdm.config);
|
||||
}
|
||||
merged
|
||||
}
|
||||
|
||||
fn origins(&self) -> HashMap<String, ConfigLayerMetadata> {
|
||||
let mut origins = HashMap::new();
|
||||
let mut path = Vec::new();
|
||||
|
||||
record_origins(
|
||||
&self.user.config,
|
||||
&self.user.metadata(),
|
||||
&mut path,
|
||||
&mut origins,
|
||||
);
|
||||
record_origins(
|
||||
&self.session_flags.config,
|
||||
&self.session_flags.metadata(),
|
||||
&mut path,
|
||||
&mut origins,
|
||||
);
|
||||
if let Some(system) = &self.system {
|
||||
record_origins(&system.config, &system.metadata(), &mut path, &mut origins);
|
||||
}
|
||||
if let Some(mdm) = &self.mdm {
|
||||
record_origins(&mdm.config, &mdm.metadata(), &mut path, &mut origins);
|
||||
}
|
||||
|
||||
origins
|
||||
}
|
||||
|
||||
fn layers_high_to_low(&self) -> Vec<ConfigLayer> {
|
||||
let mut layers = Vec::new();
|
||||
if let Some(mdm) = &self.mdm {
|
||||
layers.push(mdm.as_layer());
|
||||
}
|
||||
if let Some(system) = &self.system {
|
||||
layers.push(system.as_layer());
|
||||
}
|
||||
layers.push(self.session_flags.as_layer());
|
||||
layers.push(self.user.as_layer());
|
||||
layers
|
||||
}
|
||||
}
|
||||
|
||||
fn record_origins(
|
||||
value: &TomlValue,
|
||||
meta: &ConfigLayerMetadata,
|
||||
path: &mut Vec<String>,
|
||||
origins: &mut HashMap<String, ConfigLayerMetadata>,
|
||||
) {
|
||||
match value {
|
||||
TomlValue::Table(table) => {
|
||||
for (key, val) in table {
|
||||
path.push(key.clone());
|
||||
record_origins(val, meta, path, origins);
|
||||
path.pop();
|
||||
}
|
||||
}
|
||||
TomlValue::Array(items) => {
|
||||
for (idx, item) in items.iter().enumerate() {
|
||||
path.push(idx.to_string());
|
||||
record_origins(item, meta, path, origins);
|
||||
path.pop();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if !path.is_empty() {
|
||||
origins.insert(path.join("."), meta.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json_value(value: &TomlValue) -> JsonValue {
|
||||
serde_json::to_value(value).unwrap_or(JsonValue::Null)
|
||||
}
|
||||
|
||||
fn validate_config(value: &TomlValue) -> Result<(), toml::de::Error> {
|
||||
let _: ConfigToml = value.clone().try_into()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn version_for_toml(value: &TomlValue) -> String {
|
||||
let json = to_json_value(value);
|
||||
let canonical = canonical_json(&json);
|
||||
let serialized = serde_json::to_vec(&canonical).unwrap_or_default();
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(serialized);
|
||||
let hash = hasher.finalize();
|
||||
let hex = hash
|
||||
.iter()
|
||||
.map(|byte| format!("{byte:02x}"))
|
||||
.collect::<String>();
|
||||
format!("sha256:{hex}")
|
||||
}
|
||||
|
||||
fn canonical_json(value: &JsonValue) -> JsonValue {
|
||||
match value {
|
||||
JsonValue::Object(map) => {
|
||||
let mut sorted = serde_json::Map::new();
|
||||
let mut keys = map.keys().cloned().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
for key in keys {
|
||||
if let Some(val) = map.get(&key) {
|
||||
sorted.insert(key, canonical_json(val));
|
||||
}
|
||||
}
|
||||
JsonValue::Object(sorted)
|
||||
}
|
||||
JsonValue::Array(items) => JsonValue::Array(items.iter().map(canonical_json).collect()),
|
||||
other => other.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn paths_match(expected: &Path, provided: &str) -> bool {
|
||||
let provided_path = PathBuf::from(provided);
|
||||
if let (Ok(expanded_expected), Ok(expanded_provided)) =
|
||||
(expected.canonicalize(), provided_path.canonicalize())
|
||||
{
|
||||
return expanded_expected == expanded_provided;
|
||||
}
|
||||
|
||||
expected == provided_path
|
||||
}
|
||||
|
||||
fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> {
|
||||
let mut current = root;
|
||||
for segment in segments {
|
||||
match current {
|
||||
TomlValue::Table(table) => {
|
||||
current = table.get(segment)?;
|
||||
}
|
||||
TomlValue::Array(items) => {
|
||||
let idx: usize = segment.parse().ok()?;
|
||||
current = items.get(idx)?;
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
Some(current)
|
||||
}
|
||||
|
||||
fn override_message(layer: &ConfigLayerName) -> String {
|
||||
match layer {
|
||||
ConfigLayerName::Mdm => "Overridden by managed policy (mdm)".to_string(),
|
||||
ConfigLayerName::System => "Overridden by managed config (system)".to_string(),
|
||||
ConfigLayerName::SessionFlags => "Overridden by session flags".to_string(),
|
||||
ConfigLayerName::User => "Overridden by user config".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_override_metadata(
|
||||
layers: &LayersState,
|
||||
effective: &TomlValue,
|
||||
segments: &[String],
|
||||
) -> Option<OverriddenMetadata> {
|
||||
let user_value = value_at_path(&layers.user.config, segments);
|
||||
let effective_value = value_at_path(effective, segments);
|
||||
|
||||
if user_value.is_some() && user_value == effective_value {
|
||||
return None;
|
||||
}
|
||||
|
||||
if user_value.is_none() && effective_value.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let effective_layer = find_effective_layer(layers, segments);
|
||||
let overriding_layer = effective_layer.unwrap_or_else(|| layers.user.metadata());
|
||||
let message = override_message(&overriding_layer.name);
|
||||
|
||||
Some(OverriddenMetadata {
|
||||
message,
|
||||
overriding_layer,
|
||||
effective_value: effective_value
|
||||
.map(to_json_value)
|
||||
.unwrap_or(JsonValue::Null),
|
||||
})
|
||||
}
|
||||
|
||||
fn first_overridden_edit(
|
||||
layers: &LayersState,
|
||||
effective: &TomlValue,
|
||||
edits: &[Vec<String>],
|
||||
) -> Option<OverriddenMetadata> {
|
||||
for segments in edits {
|
||||
if let Some(meta) = compute_override_metadata(layers, effective, segments) {
|
||||
return Some(meta);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_effective_layer(layers: &LayersState, segments: &[String]) -> Option<ConfigLayerMetadata> {
|
||||
let check =
|
||||
|state: &LayerState| value_at_path(&state.config, segments).map(|_| state.metadata());
|
||||
|
||||
if let Some(mdm) = &layers.mdm
|
||||
&& let Some(meta) = check(mdm)
|
||||
{
|
||||
return Some(meta);
|
||||
}
|
||||
if let Some(system) = &layers.system
|
||||
&& let Some(meta) = check(system)
|
||||
{
|
||||
return Some(meta);
|
||||
}
|
||||
if let Some(meta) = check(&layers.session_flags) {
|
||||
return Some(meta);
|
||||
}
|
||||
check(&layers.user)
|
||||
}
|
||||
|
||||
fn system_config_path(codex_home: &Path) -> PathBuf {
|
||||
if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") {
|
||||
return PathBuf::from(path);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
PathBuf::from("/etc/codex/managed_config.toml")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
codex_home.join("managed_config.toml")
|
||||
}
|
||||
}
|
||||
|
||||
fn internal_error<E: std::fmt::Display>(context: &str, err: E) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: err.to_string(),
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
@@ -68,3 +720,255 @@ fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) ->
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_includes_origins_and_layers() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
|
||||
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let api = ConfigApi::with_overrides(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
let response = api
|
||||
.read(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await
|
||||
.expect("response");
|
||||
|
||||
assert_eq!(
|
||||
response.config.get("approval_policy"),
|
||||
Some(&json!("never"))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
response
|
||||
.origins
|
||||
.get("approval_policy")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
let layers = response.layers.expect("layers present");
|
||||
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
|
||||
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers.last().unwrap().name, ConfigLayerName::User);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn write_value_reports_override() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(
|
||||
tmp.path().join(CONFIG_FILE_NAME),
|
||||
"approval_policy = \"on-request\"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let api = ConfigApi::with_overrides(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
let result = api
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
|
||||
key_path: "approval_policy".to_string(),
|
||||
value: json!("never"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version: None,
|
||||
})
|
||||
.await
|
||||
.expect("result");
|
||||
|
||||
let read_after = api
|
||||
.read(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await
|
||||
.expect("read");
|
||||
let config_object = read_after.config.as_object().expect("object");
|
||||
assert_eq!(config_object.get("approval_policy"), Some(&json!("never")));
|
||||
assert_eq!(
|
||||
read_after
|
||||
.origins
|
||||
.get("approval_policy")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
assert_eq!(result.status, WriteStatus::Ok);
|
||||
assert!(result.overridden_metadata.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn version_conflict_rejected() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
|
||||
|
||||
let api = ConfigApi::new(tmp.path().to_path_buf(), vec![]);
|
||||
let error = api
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-5"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version: Some("sha256:bogus".to_string()),
|
||||
})
|
||||
.await
|
||||
.expect_err("should fail");
|
||||
|
||||
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
|
||||
assert_eq!(
|
||||
error
|
||||
.data
|
||||
.as_ref()
|
||||
.and_then(|d| d.get("config_write_error_code"))
|
||||
.and_then(serde_json::Value::as_str),
|
||||
Some("configVersionConflict")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_user_value_rejected_even_if_overridden_by_managed() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
|
||||
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let api = ConfigApi::with_overrides(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
let error = api
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
|
||||
key_path: "approval_policy".to_string(),
|
||||
value: json!("bogus"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version: None,
|
||||
})
|
||||
.await
|
||||
.expect_err("should fail validation");
|
||||
|
||||
assert_eq!(error.code, INVALID_REQUEST_ERROR_CODE);
|
||||
assert_eq!(
|
||||
error
|
||||
.data
|
||||
.as_ref()
|
||||
.and_then(|d| d.get("config_write_error_code"))
|
||||
.and_then(serde_json::Value::as_str),
|
||||
Some("configValidationError")
|
||||
);
|
||||
|
||||
let contents =
|
||||
std::fs::read_to_string(tmp.path().join(CONFIG_FILE_NAME)).expect("read config");
|
||||
assert_eq!(contents.trim(), "model = \"user\"");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_reports_managed_overrides_user_and_session_flags() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "model = \"user\"").unwrap();
|
||||
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "model = \"system\"").unwrap();
|
||||
|
||||
let cli_overrides = vec![(
|
||||
"model".to_string(),
|
||||
TomlValue::String("session".to_string()),
|
||||
)];
|
||||
|
||||
let api = ConfigApi::with_overrides(
|
||||
tmp.path().to_path_buf(),
|
||||
cli_overrides,
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
let response = api
|
||||
.read(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await
|
||||
.expect("response");
|
||||
|
||||
assert_eq!(response.config.get("model"), Some(&json!("system")));
|
||||
assert_eq!(
|
||||
response.origins.get("model").expect("origin").name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
let layers = response.layers.expect("layers");
|
||||
assert_eq!(layers.first().unwrap().name, ConfigLayerName::System);
|
||||
assert_eq!(layers.get(1).unwrap().name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers.get(2).unwrap().name, ConfigLayerName::User);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn write_value_reports_managed_override() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_FILE_NAME), "").unwrap();
|
||||
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let api = ConfigApi::with_overrides(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
let result = api
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: tmp.path().join(CONFIG_FILE_NAME).display().to_string(),
|
||||
key_path: "approval_policy".to_string(),
|
||||
value: json!("on-request"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version: None,
|
||||
})
|
||||
.await
|
||||
.expect("result");
|
||||
|
||||
assert_eq!(result.status, WriteStatus::OkOverridden);
|
||||
let overridden = result.overridden_metadata.expect("overridden metadata");
|
||||
assert_eq!(overridden.overriding_layer.name, ConfigLayerName::System);
|
||||
assert_eq!(overridden.effective_value, json!("never"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
@@ -102,7 +103,6 @@ pub async fn run_main(
|
||||
// control the log level with `RUST_LOG`.
|
||||
let stderr_fmt = tracing_subscriber::fmt::layer()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
|
||||
.with_filter(EnvFilter::from_default_env());
|
||||
|
||||
let feedback_layer = tracing_subscriber::fmt::layer()
|
||||
@@ -111,15 +111,14 @@ pub async fn run_main(
|
||||
.with_target(false)
|
||||
.with_filter(Targets::new().with_default(Level::TRACE));
|
||||
|
||||
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
|
||||
|
||||
let otel_tracing_layer = otel.as_ref().and_then(|o| o.tracing_layer());
|
||||
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(stderr_fmt)
|
||||
.with(feedback_layer)
|
||||
.with(otel_logger_layer)
|
||||
.with(otel_tracing_layer)
|
||||
.with(otel.as_ref().map(|provider| {
|
||||
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
|
||||
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
|
||||
)
|
||||
}))
|
||||
.try_init();
|
||||
|
||||
// Task: process incoming messages.
|
||||
|
||||
@@ -59,7 +59,6 @@ impl MessageProcessor {
|
||||
outgoing.clone(),
|
||||
codex_linux_sandbox_exe,
|
||||
Arc::clone(&config),
|
||||
cli_overrides.clone(),
|
||||
feedback,
|
||||
);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::Model;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::openai_models::model_presets::builtin_model_presets;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub async fn supported_models(
|
||||
conversation_manager: Arc<ConversationManager>,
|
||||
config: &Config,
|
||||
) -> Vec<Model> {
|
||||
conversation_manager
|
||||
.list_models(config)
|
||||
.await
|
||||
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
|
||||
builtin_model_presets(auth_mode)
|
||||
.into_iter()
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
|
||||
@@ -16,9 +16,6 @@ use tracing::warn;
|
||||
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
/// Sends messages to the client and manages request callbacks.
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_request_id: AtomicI64,
|
||||
@@ -233,7 +230,6 @@ mod tests {
|
||||
}),
|
||||
secondary: None,
|
||||
credits: None,
|
||||
plan_type: Some(PlanType::Plus),
|
||||
},
|
||||
});
|
||||
|
||||
@@ -249,8 +245,7 @@ mod tests {
|
||||
"resetsAt": 123
|
||||
},
|
||||
"secondary": null,
|
||||
"credits": null,
|
||||
"planType": "plus"
|
||||
"credits": null
|
||||
}
|
||||
},
|
||||
}),
|
||||
|
||||
@@ -13,7 +13,7 @@ assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-core = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
mod auth_fixtures;
|
||||
mod mcp_process;
|
||||
mod mock_model_server;
|
||||
mod models_cache;
|
||||
mod responses;
|
||||
mod rollout;
|
||||
|
||||
@@ -12,16 +11,9 @@ pub use auth_fixtures::write_chatgpt_auth;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
pub use core_test_support::format_with_current_shell;
|
||||
pub use core_test_support::format_with_current_shell_display;
|
||||
pub use core_test_support::format_with_current_shell_display_non_login;
|
||||
pub use core_test_support::format_with_current_shell_non_login;
|
||||
pub use core_test_support::test_path_buf_with_windows;
|
||||
pub use core_test_support::test_tmp_path;
|
||||
pub use core_test_support::test_tmp_path_buf;
|
||||
pub use mcp_process::McpProcess;
|
||||
pub use mock_model_server::create_mock_chat_completions_server;
|
||||
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
||||
pub use models_cache::write_models_cache;
|
||||
pub use models_cache::write_models_cache_with_models;
|
||||
pub use responses::create_apply_patch_sse_response;
|
||||
pub use responses::create_exec_command_sse_response;
|
||||
pub use responses::create_final_assistant_message_sse_response;
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::openai_models::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
|
||||
/// Convert a ModelPreset to ModelInfo for cache storage.
|
||||
fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
ModelInfo {
|
||||
slug: preset.id.clone(),
|
||||
display_name: preset.display_name.clone(),
|
||||
description: Some(preset.description.clone()),
|
||||
default_reasoning_level: preset.default_reasoning_effort,
|
||||
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: if preset.show_in_picker {
|
||||
ModelVisibility::List
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
base_instructions: None,
|
||||
supports_reasoning_summaries: false,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a models_cache.json file to the codex home directory.
|
||||
/// This prevents ModelsManager from making network requests to refresh models.
|
||||
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
||||
/// Uses the built-in model presets from ModelsManager, converted to ModelInfo format.
|
||||
pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
|
||||
// Get all presets and filter for show_in_picker (same as builtin_model_presets does)
|
||||
let presets: Vec<&ModelPreset> = all_model_presets()
|
||||
.iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.collect();
|
||||
// Convert presets to ModelInfo, assigning priorities (higher = earlier in list)
|
||||
// Priority is used for sorting, so first model gets highest priority
|
||||
let models: Vec<ModelInfo> = presets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, preset)| {
|
||||
// Higher priority = earlier in list, so reverse the index
|
||||
let priority = (presets.len() - idx) as i32;
|
||||
preset_to_info(preset, priority)
|
||||
})
|
||||
.collect();
|
||||
|
||||
write_models_cache_with_models(codex_home, models)
|
||||
}
|
||||
|
||||
/// Write a models_cache.json file with specific models.
|
||||
/// Useful when tests need specific models to be available.
|
||||
pub fn write_models_cache_with_models(
|
||||
codex_home: &Path,
|
||||
models: Vec<ModelInfo>,
|
||||
) -> std::io::Result<()> {
|
||||
let cache_path = codex_home.join("models_cache.json");
|
||||
// DateTime<Utc> serializes to RFC3339 format by default with serde
|
||||
let fetched_at: DateTime<Utc> = Utc::now();
|
||||
let cache = json!({
|
||||
"fetched_at": fetched_at,
|
||||
"etag": null,
|
||||
"models": models
|
||||
});
|
||||
std::fs::write(cache_path, serde_json::to_string_pretty(&cache)?)
|
||||
}
|
||||
@@ -271,6 +271,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
command: format_with_current_shell("python3 -c 'print(42)'"),
|
||||
cwd: working_directory.clone(),
|
||||
reason: None,
|
||||
risk: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "python3 -c 'print(42)'".to_string()
|
||||
}],
|
||||
@@ -410,7 +411,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
cwd: first_cwd.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![first_cwd.try_into()?],
|
||||
writable_roots: vec![first_cwd.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::test_tmp_path;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::GetUserSavedConfigResponse;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
@@ -24,12 +23,10 @@ use tokio::time::timeout;
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||
let writable_root = test_tmp_path();
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
r#"
|
||||
model = "gpt-5.1-codex-max"
|
||||
approval_policy = "on-request"
|
||||
sandbox_mode = "workspace-write"
|
||||
@@ -41,7 +38,7 @@ forced_chatgpt_workspace_id = "12345678-0000-0000-0000-000000000000"
|
||||
forced_login_method = "chatgpt"
|
||||
|
||||
[sandbox_workspace_write]
|
||||
writable_roots = [{}]
|
||||
writable_roots = ["/tmp"]
|
||||
network_access = true
|
||||
exclude_tmpdir_env_var = true
|
||||
exclude_slash_tmp = true
|
||||
@@ -59,8 +56,6 @@ model_verbosity = "medium"
|
||||
model_provider = "openai"
|
||||
chatgpt_base_url = "https://api.chatgpt.com"
|
||||
"#,
|
||||
serde_json::json!(writable_root)
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -80,13 +75,12 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
|
||||
.await??;
|
||||
|
||||
let config: GetUserSavedConfigResponse = to_response(resp)?;
|
||||
let writable_root = test_tmp_path();
|
||||
let expected = GetUserSavedConfigResponse {
|
||||
config: UserSavedConfig {
|
||||
approval_policy: Some(AskForApproval::OnRequest),
|
||||
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
|
||||
sandbox_settings: Some(SandboxSettings {
|
||||
writable_roots: vec![writable_root],
|
||||
writable_roots: vec!["/tmp".into()],
|
||||
network_access: Some(true),
|
||||
exclude_tmpdir_env_var: Some(true),
|
||||
exclude_slash_tmp: Some(true),
|
||||
|
||||
@@ -358,81 +358,3 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn list_conversations_fetches_through_filtered_pages() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
// Only the last 3 conversations match the provider filter; request 3 and
|
||||
// ensure pagination keeps fetching past non-matching pages.
|
||||
let cases = [
|
||||
(
|
||||
"2025-03-04T12-00-00",
|
||||
"2025-03-04T12:00:00Z",
|
||||
"skip_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-03T12-00-00",
|
||||
"2025-03-03T12:00:00Z",
|
||||
"skip_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-02T12-00-00",
|
||||
"2025-03-02T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-01T12-00-00",
|
||||
"2025-03-01T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
(
|
||||
"2025-02-28T12-00-00",
|
||||
"2025-02-28T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
];
|
||||
|
||||
for (ts_file, ts_rfc, provider) in cases {
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
ts_file,
|
||||
ts_rfc,
|
||||
"Hello",
|
||||
Some(provider),
|
||||
None,
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let req_id = mcp
|
||||
.send_list_conversations_request(ListConversationsParams {
|
||||
page_size: Some(3),
|
||||
cursor: None,
|
||||
model_providers: Some(vec!["target_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
|
||||
)
|
||||
.await??;
|
||||
let ListConversationsResponse { items, next_cursor } =
|
||||
to_response::<ListConversationsResponse>(resp)?;
|
||||
|
||||
assert_eq!(
|
||||
items.len(),
|
||||
3,
|
||||
"should fetch across pages to satisfy the limit"
|
||||
);
|
||||
assert!(
|
||||
items
|
||||
.iter()
|
||||
.all(|item| item.model_provider == "target_provider")
|
||||
);
|
||||
assert_eq!(next_cursor, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::CancelLoginChatGptParams;
|
||||
use codex_app_server_protocol::CancelLoginChatGptResponse;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
@@ -12,6 +14,7 @@ use codex_core::auth::AuthCredentialsStoreMode;
|
||||
use codex_login::login_with_api_key;
|
||||
use serial_test::serial;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -84,6 +87,48 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||
#[serial(login_port)]
|
||||
async fn login_and_cancel_chatgpt() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let login_id = mcp.send_login_chat_gpt_request().await?;
|
||||
let login_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
|
||||
)
|
||||
.await??;
|
||||
let login: LoginChatGptResponse = to_response(login_resp)?;
|
||||
|
||||
let cancel_id = mcp
|
||||
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
|
||||
login_id: login.login_id,
|
||||
})
|
||||
.await?;
|
||||
let cancel_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
||||
)
|
||||
.await??;
|
||||
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
|
||||
|
||||
// Optionally observe the completion notification; do not fail if it races.
|
||||
let maybe_note = timeout(
|
||||
Duration::from_secs(2),
|
||||
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
|
||||
)
|
||||
.await;
|
||||
if maybe_note.is_err() {
|
||||
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
let contents = format!(
|
||||
|
||||
@@ -241,7 +241,7 @@ async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
|
||||
#[tokio::test]
|
||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||
#[serial(login_port)]
|
||||
async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> {
|
||||
async fn login_account_chatgpt_start() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::test_path_buf_with_windows;
|
||||
use app_test_support::test_tmp_path_buf;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigEdit;
|
||||
use codex_app_server_protocol::ConfigLayerName;
|
||||
@@ -15,8 +12,6 @@ use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::MergeStrategy;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
@@ -62,7 +57,7 @@ sandbox_mode = "workspace-write"
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
assert_eq!(config.model.as_deref(), Some("gpt-user"));
|
||||
assert_eq!(config.get("model"), Some(&json!("gpt-user")));
|
||||
assert_eq!(
|
||||
origins.get("model").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
@@ -76,97 +71,31 @@ sandbox_mode = "workspace-write"
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_read_includes_tools() -> Result<()> {
|
||||
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-user"
|
||||
|
||||
[tools]
|
||||
web_search = true
|
||||
view_image = false
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ConfigReadResponse {
|
||||
config,
|
||||
origins,
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
let tools = config.tools.expect("tools present");
|
||||
assert_eq!(
|
||||
tools,
|
||||
ToolsV2 {
|
||||
web_search: Some(true),
|
||||
view_image: Some(false),
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
origins.get("tools.web_search").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
assert_eq!(
|
||||
origins.get("tools.view_image").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers[1].name, ConfigLayerName::User);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let user_dir = test_path_buf_with_windows("/user", Some(r"C:\Users\user"));
|
||||
let system_dir = test_path_buf_with_windows("/system", Some(r"C:\System"));
|
||||
write_config(
|
||||
&codex_home,
|
||||
&format!(
|
||||
r#"
|
||||
model = "gpt-user"
|
||||
approval_policy = "on-request"
|
||||
sandbox_mode = "workspace-write"
|
||||
|
||||
[sandbox_workspace_write]
|
||||
writable_roots = [{}]
|
||||
writable_roots = ["/user"]
|
||||
network_access = true
|
||||
"#,
|
||||
serde_json::json!(user_dir)
|
||||
),
|
||||
)?;
|
||||
|
||||
let managed_path = codex_home.path().join("managed_config.toml");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
format!(
|
||||
r#"
|
||||
r#"
|
||||
model = "gpt-system"
|
||||
approval_policy = "never"
|
||||
|
||||
[sandbox_workspace_write]
|
||||
writable_roots = [{}]
|
||||
writable_roots = ["/system"]
|
||||
"#,
|
||||
serde_json::json!(system_dir.clone())
|
||||
),
|
||||
)?;
|
||||
|
||||
let managed_path_str = managed_path.display().to_string();
|
||||
@@ -194,29 +123,30 @@ writable_roots = [{}]
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
assert_eq!(config.model.as_deref(), Some("gpt-system"));
|
||||
assert_eq!(config.get("model"), Some(&json!("gpt-system")));
|
||||
assert_eq!(
|
||||
origins.get("model").expect("origin").name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert_eq!(config.approval_policy, Some(AskForApproval::Never));
|
||||
assert_eq!(config.get("approval_policy"), Some(&json!("never")));
|
||||
assert_eq!(
|
||||
origins.get("approval_policy").expect("origin").name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert_eq!(config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
||||
assert_eq!(config.get("sandbox_mode"), Some(&json!("workspace-write")));
|
||||
assert_eq!(
|
||||
origins.get("sandbox_mode").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
|
||||
let sandbox = config
|
||||
.sandbox_workspace_write
|
||||
.as_ref()
|
||||
.expect("sandbox workspace write");
|
||||
assert_eq!(sandbox.writable_roots, vec![system_dir]);
|
||||
assert_eq!(
|
||||
config
|
||||
.get("sandbox_workspace_write")
|
||||
.and_then(|v| v.get("writable_roots")),
|
||||
Some(&json!(["/system"]))
|
||||
);
|
||||
assert_eq!(
|
||||
origins
|
||||
.get("sandbox_workspace_write.writable_roots.0")
|
||||
@@ -225,7 +155,12 @@ writable_roots = [{}]
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert!(sandbox.network_access);
|
||||
assert_eq!(
|
||||
config
|
||||
.get("sandbox_workspace_write")
|
||||
.and_then(|v| v.get("network_access")),
|
||||
Some(&json!(true))
|
||||
);
|
||||
assert_eq!(
|
||||
origins
|
||||
.get("sandbox_workspace_write.network_access")
|
||||
@@ -271,7 +206,7 @@ model = "gpt-old"
|
||||
|
||||
let write_id = mcp
|
||||
.send_config_value_write_request(ConfigValueWriteParams {
|
||||
file_path: None,
|
||||
file_path: codex_home.path().join("config.toml").display().to_string(),
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-new"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
@@ -284,16 +219,8 @@ model = "gpt-old"
|
||||
)
|
||||
.await??;
|
||||
let write: ConfigWriteResponse = to_response(write_resp)?;
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
assert_eq!(write.status, WriteStatus::Ok);
|
||||
assert_eq!(write.file_path, expected_file_path);
|
||||
assert!(write.overridden_metadata.is_none());
|
||||
|
||||
let verify_id = mcp
|
||||
@@ -307,7 +234,7 @@ model = "gpt-old"
|
||||
)
|
||||
.await??;
|
||||
let verify: ConfigReadResponse = to_response(verify_resp)?;
|
||||
assert_eq!(verify.config.model.as_deref(), Some("gpt-new"));
|
||||
assert_eq!(verify.config.get("model"), Some(&json!("gpt-new")));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -327,7 +254,7 @@ model = "gpt-old"
|
||||
|
||||
let write_id = mcp
|
||||
.send_config_value_write_request(ConfigValueWriteParams {
|
||||
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
||||
file_path: codex_home.path().join("config.toml").display().to_string(),
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-new"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
@@ -359,10 +286,9 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let writable_root = test_tmp_path_buf();
|
||||
let batch_id = mcp
|
||||
.send_config_batch_write_request(ConfigBatchWriteParams {
|
||||
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
||||
file_path: codex_home.path().join("config.toml").display().to_string(),
|
||||
edits: vec![
|
||||
ConfigEdit {
|
||||
key_path: "sandbox_mode".to_string(),
|
||||
@@ -372,7 +298,7 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
ConfigEdit {
|
||||
key_path: "sandbox_workspace_write".to_string(),
|
||||
value: json!({
|
||||
"writable_roots": [writable_root.clone()],
|
||||
"writable_roots": ["/tmp"],
|
||||
"network_access": false
|
||||
}),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
@@ -388,14 +314,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
.await??;
|
||||
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
|
||||
assert_eq!(batch_write.status, WriteStatus::Ok);
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
assert_eq!(batch_write.file_path, expected_file_path);
|
||||
|
||||
let read_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
@@ -408,14 +326,22 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
)
|
||||
.await??;
|
||||
let read: ConfigReadResponse = to_response(read_resp)?;
|
||||
assert_eq!(read.config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
||||
let sandbox = read
|
||||
.config
|
||||
.sandbox_workspace_write
|
||||
.as_ref()
|
||||
.expect("sandbox workspace write");
|
||||
assert_eq!(sandbox.writable_roots, vec![writable_root]);
|
||||
assert!(!sandbox.network_access);
|
||||
assert_eq!(
|
||||
read.config.get("sandbox_mode"),
|
||||
Some(&json!("workspace-write"))
|
||||
);
|
||||
assert_eq!(
|
||||
read.config
|
||||
.get("sandbox_workspace_write")
|
||||
.and_then(|v| v.get("writable_roots")),
|
||||
Some(&json!(["/tmp"]))
|
||||
);
|
||||
assert_eq!(
|
||||
read.config
|
||||
.get("sandbox_workspace_write")
|
||||
.and_then(|v| v.get("network_access")),
|
||||
Some(&json!(false))
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use app_test_support::write_models_cache;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::Model;
|
||||
@@ -23,7 +22,6 @@ const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
||||
#[tokio::test]
|
||||
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -64,7 +62,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
description: "Maximizes reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
@@ -116,39 +114,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
display_name: "gpt-5.2".to_string(),
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Greater reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1".to_string(),
|
||||
model: "gpt-5.1".to_string(),
|
||||
@@ -186,7 +151,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn list_models_pagination_works() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -276,37 +240,14 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(fourth_response)?;
|
||||
|
||||
assert_eq!(fourth_items.len(), 1);
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2");
|
||||
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
|
||||
|
||||
let fifth_request = mcp
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fifth_cursor.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let fifth_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let ModelListResponse {
|
||||
data: fifth_items,
|
||||
next_cursor: fifth_cursor,
|
||||
} = to_response::<ModelListResponse>(fifth_response)?;
|
||||
|
||||
assert_eq!(fifth_items.len(), 1);
|
||||
assert_eq!(fifth_items[0].id, "gpt-5.1");
|
||||
assert!(fifth_cursor.is_none());
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.1");
|
||||
assert!(fourth_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
@@ -11,7 +11,6 @@ use codex_app_server_protocol::RateLimitSnapshot;
|
||||
use codex_app_server_protocol::RateLimitWindow;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_core::auth::AuthCredentialsStoreMode;
|
||||
use codex_protocol::account::PlanType as AccountPlanType;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -154,7 +153,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
||||
resets_at: Some(secondary_reset_timestamp),
|
||||
}),
|
||||
credits: None,
|
||||
plan_type: Some(AccountPlanType::Pro),
|
||||
},
|
||||
};
|
||||
assert_eq!(received, expected);
|
||||
|
||||
@@ -6,96 +6,37 @@ use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SessionSource;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
async fn init_mcp(codex_home: &Path) -> Result<McpProcess> {
|
||||
let mut mcp = McpProcess::new(codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
Ok(mcp)
|
||||
}
|
||||
|
||||
async fn list_threads(
|
||||
mcp: &mut McpProcess,
|
||||
cursor: Option<String>,
|
||||
limit: Option<u32>,
|
||||
providers: Option<Vec<String>>,
|
||||
) -> Result<ThreadListResponse> {
|
||||
let request_id = mcp
|
||||
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
|
||||
cursor,
|
||||
limit,
|
||||
model_providers: providers,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
to_response::<ThreadListResponse>(resp)
|
||||
}
|
||||
|
||||
fn create_fake_rollouts<F, G>(
|
||||
codex_home: &Path,
|
||||
count: usize,
|
||||
provider_for_index: F,
|
||||
timestamp_for_index: G,
|
||||
preview: &str,
|
||||
) -> Result<Vec<String>>
|
||||
where
|
||||
F: Fn(usize) -> &'static str,
|
||||
G: Fn(usize) -> (String, String),
|
||||
{
|
||||
let mut ids = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let (ts_file, ts_rfc) = timestamp_for_index(i);
|
||||
ids.push(create_fake_rollout(
|
||||
codex_home,
|
||||
&ts_file,
|
||||
&ts_rfc,
|
||||
preview,
|
||||
Some(provider_for_index(i)),
|
||||
None,
|
||||
)?);
|
||||
}
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
fn timestamp_at(
|
||||
year: i32,
|
||||
month: u32,
|
||||
day: u32,
|
||||
hour: u32,
|
||||
minute: u32,
|
||||
second: u32,
|
||||
) -> (String, String) {
|
||||
(
|
||||
format!("{year:04}-{month:02}-{day:02}T{hour:02}-{minute:02}-{second:02}"),
|
||||
format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z"),
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_basic_empty() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let list_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
)
|
||||
.await?;
|
||||
.await??;
|
||||
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
|
||||
assert!(data.is_empty());
|
||||
assert_eq!(next_cursor, None);
|
||||
|
||||
@@ -145,19 +86,26 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
// Page 1: limit 2 → expect next_cursor Some.
|
||||
let page1_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(2),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let page1_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse {
|
||||
data: data1,
|
||||
next_cursor: cursor1,
|
||||
} = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(2),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
} = to_response::<ThreadListResponse>(page1_resp)?;
|
||||
assert_eq!(data1.len(), 2);
|
||||
for thread in &data1 {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
@@ -171,16 +119,22 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
let cursor1 = cursor1.expect("expected nextCursor on first page");
|
||||
|
||||
// Page 2: with cursor → expect next_cursor None when no more results.
|
||||
let page2_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: Some(cursor1),
|
||||
limit: Some(2),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let page2_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse {
|
||||
data: data2,
|
||||
next_cursor: cursor2,
|
||||
} = list_threads(
|
||||
&mut mcp,
|
||||
Some(cursor1),
|
||||
Some(2),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
} = to_response::<ThreadListResponse>(page2_resp)?;
|
||||
assert!(data2.len() <= 2);
|
||||
for thread in &data2 {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
@@ -219,16 +173,23 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
// Filter to only other_provider; expect 1 item, nextCursor None.
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["other_provider".to_string()]),
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: Some(vec!["other_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
)
|
||||
.await?;
|
||||
.await??;
|
||||
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
|
||||
assert_eq!(data.len(), 1);
|
||||
assert_eq!(next_cursor, None);
|
||||
let thread = &data[0];
|
||||
@@ -244,146 +205,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
// Newest 16 conversations belong to a different provider; the older 8 are the
|
||||
// only ones that match the filter. We request 8 so the server must keep
|
||||
// paging past the first two pages to reach the desired count.
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
24,
|
||||
|i| {
|
||||
if i < 16 {
|
||||
"skip_provider"
|
||||
} else {
|
||||
"target_provider"
|
||||
}
|
||||
},
|
||||
|i| timestamp_at(2025, 3, 30 - i as u32, 12, 0, 0),
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Request 8 threads for the target provider; the matches only start on the
|
||||
// third page so we rely on pagination to reach the limit.
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(8),
|
||||
Some(vec!["target_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
8,
|
||||
"should keep paging until the requested count is filled"
|
||||
);
|
||||
assert!(
|
||||
data.iter()
|
||||
.all(|thread| thread.model_provider == "target_provider"),
|
||||
"all returned threads must match the requested provider"
|
||||
);
|
||||
assert_eq!(
|
||||
next_cursor, None,
|
||||
"once the requested count is satisfied on the final page, nextCursor should be None"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_enforces_max_limit() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
105,
|
||||
|_| "mock_provider",
|
||||
|i| {
|
||||
let month = 5 + (i / 28);
|
||||
let day = (i % 28) + 1;
|
||||
timestamp_at(2025, month as u32, day as u32, 0, 0, 0)
|
||||
},
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(200),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
100,
|
||||
"limit should be clamped to the maximum page size"
|
||||
);
|
||||
assert!(
|
||||
next_cursor.is_some(),
|
||||
"when more than the maximum exist, nextCursor should continue pagination"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
// Only the last 7 conversations match the provider filter; we ask for 10 to
|
||||
// ensure the server exhausts pagination without looping forever.
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
22,
|
||||
|i| {
|
||||
if i < 15 {
|
||||
"skip_provider"
|
||||
} else {
|
||||
"target_provider"
|
||||
}
|
||||
},
|
||||
|i| timestamp_at(2025, 4, 28 - i as u32, 8, 0, 0),
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Request more threads than exist after filtering; expect all matches to be
|
||||
// returned with nextCursor None.
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["target_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
7,
|
||||
"all available filtered threads should be returned"
|
||||
);
|
||||
assert!(
|
||||
data.iter()
|
||||
.all(|thread| thread.model_provider == "target_provider"),
|
||||
"results should still respect the provider filter"
|
||||
);
|
||||
assert_eq!(
|
||||
next_cursor, None,
|
||||
"when results are exhausted before reaching the limit, nextCursor should be None"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_includes_git_info() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -403,15 +224,22 @@ async fn thread_list_includes_git_info() -> Result<()> {
|
||||
Some(git_info),
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let ThreadListResponse { data, .. } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
)
|
||||
.await?;
|
||||
.await??;
|
||||
let ThreadListResponse { data, .. } = to_response::<ThreadListResponse>(resp)?;
|
||||
let thread = data
|
||||
.iter()
|
||||
.find(|t| t.id == conversation_id)
|
||||
|
||||
@@ -427,6 +427,7 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
@@ -532,7 +533,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
cwd: Some(first_cwd.clone()),
|
||||
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
|
||||
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![first_cwd.try_into()?],
|
||||
writable_roots: vec![first_cwd.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
|
||||
@@ -112,7 +112,7 @@ fn classify_shell_name(shell: &str) -> Option<String> {
|
||||
|
||||
fn classify_shell(shell: &str, flag: &str) -> Option<ApplyPatchShell> {
|
||||
classify_shell_name(shell).and_then(|name| match name.as_str() {
|
||||
"bash" | "zsh" | "sh" if matches!(flag, "-lc" | "-c") => Some(ApplyPatchShell::Unix),
|
||||
"bash" | "zsh" | "sh" if flag == "-lc" => Some(ApplyPatchShell::Unix),
|
||||
"pwsh" | "powershell" if flag.eq_ignore_ascii_case("-command") => {
|
||||
Some(ApplyPatchShell::PowerShell)
|
||||
}
|
||||
@@ -1049,13 +1049,6 @@ mod tests {
|
||||
assert_match(&heredoc_script(""), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_non_login_shell() {
|
||||
let script = heredoc_script("");
|
||||
let args = strs_to_strings(&["bash", "-c", &script]);
|
||||
assert_match_args(args, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_applypatch() {
|
||||
let args = strs_to_strings(&[
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
** text eol=lf
|
||||
@@ -1 +0,0 @@
|
||||
This is a new file
|
||||
@@ -1,4 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Add File: bar.md
|
||||
+This is a new file
|
||||
*** End Patch
|
||||
@@ -1,2 +0,0 @@
|
||||
line1
|
||||
changed
|
||||
@@ -1 +0,0 @@
|
||||
created
|
||||
@@ -1 +0,0 @@
|
||||
obsolete
|
||||
@@ -1,2 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
@@ -1,9 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Add File: nested/new.txt
|
||||
+created
|
||||
*** Delete File: delete.txt
|
||||
*** Update File: modify.txt
|
||||
@@
|
||||
-line2
|
||||
+changed
|
||||
*** End Patch
|
||||
@@ -1,4 +0,0 @@
|
||||
line1
|
||||
changed2
|
||||
line3
|
||||
changed4
|
||||
@@ -1,4 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
@@ -1,9 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: multi.txt
|
||||
@@
|
||||
-line2
|
||||
+changed2
|
||||
@@
|
||||
-line4
|
||||
+changed4
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
unrelated file
|
||||
@@ -1 +0,0 @@
|
||||
new content
|
||||
@@ -1 +0,0 @@
|
||||
old content
|
||||
@@ -1 +0,0 @@
|
||||
unrelated file
|
||||
@@ -1,7 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: old/name.txt
|
||||
*** Move to: renamed/dir/name.txt
|
||||
@@
|
||||
-old content
|
||||
+new content
|
||||
*** End Patch
|
||||
@@ -1,2 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** End Patch
|
||||
@@ -1,2 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
@@ -1,2 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
@@ -1,6 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: modify.txt
|
||||
@@
|
||||
-missing
|
||||
+changed
|
||||
*** End Patch
|
||||
@@ -1,3 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: missing.txt
|
||||
*** End Patch
|
||||
@@ -1,3 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: foo.txt
|
||||
*** End Patch
|
||||
@@ -1,6 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: missing.txt
|
||||
@@
|
||||
-old
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
unrelated file
|
||||
@@ -1 +0,0 @@
|
||||
new
|
||||
@@ -1 +0,0 @@
|
||||
from
|
||||
@@ -1 +0,0 @@
|
||||
unrelated file
|
||||
@@ -1 +0,0 @@
|
||||
existing
|
||||
@@ -1,7 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: old/name.txt
|
||||
*** Move to: renamed/dir/name.txt
|
||||
@@
|
||||
-from
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
new content
|
||||
@@ -1 +0,0 @@
|
||||
old content
|
||||
@@ -1,4 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Add File: duplicate.txt
|
||||
+new content
|
||||
*** End Patch
|
||||
@@ -1,3 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: dir
|
||||
*** End Patch
|
||||
@@ -1,3 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Frobnicate File: foo
|
||||
*** End Patch
|
||||
@@ -1,2 +0,0 @@
|
||||
first line
|
||||
second line
|
||||
@@ -1 +0,0 @@
|
||||
no newline at end
|
||||
@@ -1,7 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: no_newline.txt
|
||||
@@
|
||||
-no newline at end
|
||||
+first line
|
||||
+second line
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
hello
|
||||
@@ -1,8 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Add File: created.txt
|
||||
+hello
|
||||
*** Update File: missing.txt
|
||||
@@
|
||||
-old
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -1,4 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
added line 1
|
||||
added line 2
|
||||
@@ -1,2 +0,0 @@
|
||||
line1
|
||||
line2
|
||||
@@ -1,6 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: input.txt
|
||||
@@
|
||||
+added line 1
|
||||
+added line 2
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
new
|
||||
@@ -1 +0,0 @@
|
||||
old
|
||||
@@ -1,6 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: foo.txt
|
||||
@@
|
||||
-old
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -1 +0,0 @@
|
||||
two
|
||||
@@ -1 +0,0 @@
|
||||
one
|
||||
@@ -1,6 +0,0 @@
|
||||
*** Begin Patch
|
||||
*** Update File: file.txt
|
||||
@@
|
||||
-one
|
||||
+two
|
||||
*** End Patch
|
||||
@@ -1,18 +0,0 @@
|
||||
# Overview
|
||||
This directory is a collection of end to end tests for the apply-patch specification, meant to be easily portable to other languages or platforms.
|
||||
|
||||
|
||||
# Specification
|
||||
Each test case is one directory, composed of input state (input/), the patch operation (patch.txt), and the expected final state (expected/). This structure is designed to keep tests simple (i.e. test exactly one patch at a time) while still providing enough flexibility to test any given operation across files.
|
||||
|
||||
Here's what this would look like for a simple test apply-patch test case to create a new file:
|
||||
|
||||
```
|
||||
001_add/
|
||||
input/
|
||||
foo.md
|
||||
expected/
|
||||
foo.md
|
||||
bar.md
|
||||
patch.txt
|
||||
```
|
||||
@@ -1,4 +1,3 @@
|
||||
mod cli;
|
||||
mod scenarios;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod tool;
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
|
||||
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
|
||||
let scenario = scenario?;
|
||||
let path = scenario.path();
|
||||
if path.is_dir() {
|
||||
run_apply_patch_scenario(&path)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads a scenario directory, copies the input files to a temporary directory, runs apply-patch,
|
||||
/// and asserts that the final state matches the expected state exactly.
|
||||
fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
|
||||
// Copy the input files to the temporary directory
|
||||
let input_dir = dir.join("input");
|
||||
if input_dir.is_dir() {
|
||||
copy_dir_recursive(&input_dir, tmp.path())?;
|
||||
}
|
||||
|
||||
// Read the patch.txt file
|
||||
let patch = fs::read_to_string(dir.join("patch.txt"))?;
|
||||
|
||||
// Run apply_patch in the temporary directory. We intentionally do not assert
|
||||
// on the exit status here; the scenarios are specified purely in terms of
|
||||
// final filesystem state, which we compare below.
|
||||
Command::cargo_bin("apply_patch")?
|
||||
.arg(patch)
|
||||
.current_dir(tmp.path())
|
||||
.output()?;
|
||||
|
||||
// Assert that the final state matches the expected state exactly
|
||||
let expected_dir = dir.join("expected");
|
||||
let expected_snapshot = snapshot_dir(&expected_dir)?;
|
||||
let actual_snapshot = snapshot_dir(tmp.path())?;
|
||||
|
||||
assert_eq!(
|
||||
actual_snapshot,
|
||||
expected_snapshot,
|
||||
"Scenario {} did not match expected final state",
|
||||
dir.display()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Entry {
|
||||
File(Vec<u8>),
|
||||
Dir,
|
||||
}
|
||||
|
||||
fn snapshot_dir(root: &Path) -> anyhow::Result<BTreeMap<PathBuf, Entry>> {
|
||||
let mut entries = BTreeMap::new();
|
||||
if root.is_dir() {
|
||||
snapshot_dir_recursive(root, root, &mut entries)?;
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
fn snapshot_dir_recursive(
|
||||
base: &Path,
|
||||
dir: &Path,
|
||||
entries: &mut BTreeMap<PathBuf, Entry>,
|
||||
) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let Some(stripped) = path.strip_prefix(base).ok() else {
|
||||
continue;
|
||||
};
|
||||
let rel = stripped.to_path_buf();
|
||||
let file_type = entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
entries.insert(rel.clone(), Entry::Dir);
|
||||
snapshot_dir_recursive(base, &path, entries)?;
|
||||
} else if file_type.is_file() {
|
||||
let contents = fs::read(&path)?;
|
||||
entries.insert(rel, Entry::File(contents));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let file_type = entry.file_type()?;
|
||||
let dest_path = dst.join(entry.file_name());
|
||||
if file_type.is_dir() {
|
||||
fs::create_dir_all(&dest_path)?;
|
||||
copy_dir_recursive(&path, &dest_path)?;
|
||||
} else if file_type.is_file() {
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
fs::copy(&path, &dest_path)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -7,7 +7,6 @@ use crate::types::TurnAttemptsSiblingTurnsResponse;
|
||||
use anyhow::Result;
|
||||
use codex_core::auth::CodexAuth;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_protocol::account::PlanType as AccountPlanType;
|
||||
use codex_protocol::protocol::CreditsSnapshot;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow;
|
||||
@@ -292,7 +291,6 @@ impl Client {
|
||||
primary,
|
||||
secondary,
|
||||
credits: Self::map_credits(payload.credits),
|
||||
plan_type: Some(Self::map_plan_type(payload.plan_type)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,23 +325,6 @@ impl Client {
|
||||
})
|
||||
}
|
||||
|
||||
fn map_plan_type(plan_type: crate::types::PlanType) -> AccountPlanType {
|
||||
match plan_type {
|
||||
crate::types::PlanType::Free => AccountPlanType::Free,
|
||||
crate::types::PlanType::Plus => AccountPlanType::Plus,
|
||||
crate::types::PlanType::Pro => AccountPlanType::Pro,
|
||||
crate::types::PlanType::Team => AccountPlanType::Team,
|
||||
crate::types::PlanType::Business => AccountPlanType::Business,
|
||||
crate::types::PlanType::Enterprise => AccountPlanType::Enterprise,
|
||||
crate::types::PlanType::Edu | crate::types::PlanType::Education => AccountPlanType::Edu,
|
||||
crate::types::PlanType::Guest
|
||||
| crate::types::PlanType::Go
|
||||
| crate::types::PlanType::FreeWorkspace
|
||||
| crate::types::PlanType::Quorum
|
||||
| crate::types::PlanType::K12 => AccountPlanType::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
fn window_minutes_from_seconds(seconds: i32) -> Option<i64> {
|
||||
if seconds <= 0 {
|
||||
return None;
|
||||
|
||||
@@ -36,7 +36,6 @@ codex-responses-api-proxy = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-stdio-to-uds = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
codex-tui2 = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
|
||||
@@ -136,9 +136,7 @@ async fn run_command_under_sandbox(
|
||||
if let SandboxType::Windows = sandbox_type {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
use codex_core::features::Feature;
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
|
||||
|
||||
let policy_str = serde_json::to_string(&config.sandbox_policy)?;
|
||||
|
||||
@@ -147,32 +145,18 @@ async fn run_command_under_sandbox(
|
||||
let env_map = env.clone();
|
||||
let command_vec = command.clone();
|
||||
let base_dir = config.codex_home.clone();
|
||||
let use_elevated = config.features.enabled(Feature::WindowsSandbox)
|
||||
&& config.features.enabled(Feature::WindowsSandboxElevated);
|
||||
|
||||
// Preflight audit is invoked elsewhere at the appropriate times.
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
if use_elevated {
|
||||
run_windows_sandbox_capture_elevated(
|
||||
policy_str.as_str(),
|
||||
&sandbox_cwd,
|
||||
base_dir.as_path(),
|
||||
command_vec,
|
||||
&cwd_clone,
|
||||
env_map,
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
run_windows_sandbox_capture(
|
||||
policy_str.as_str(),
|
||||
&sandbox_cwd,
|
||||
base_dir.as_path(),
|
||||
command_vec,
|
||||
&cwd_clone,
|
||||
env_map,
|
||||
None,
|
||||
)
|
||||
}
|
||||
run_windows_sandbox_capture(
|
||||
policy_str.as_str(),
|
||||
&sandbox_cwd,
|
||||
base_dir.as_path(),
|
||||
command_vec,
|
||||
&cwd_clone,
|
||||
env_map,
|
||||
None,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
|
||||
use codex_tui::AppExitInfo;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
use codex_tui::update_action::UpdateAction;
|
||||
use codex_tui2 as tui2;
|
||||
use owo_colors::OwoColorize;
|
||||
use std::path::PathBuf;
|
||||
use supports_color::Stream;
|
||||
@@ -38,11 +37,6 @@ use crate::mcp_cmd::McpCli;
|
||||
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_config_as_toml_with_cli_overrides;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::features::FeatureOverrides;
|
||||
use codex_core::features::Features;
|
||||
use codex_core::features::is_known_feature_key;
|
||||
|
||||
/// Codex CLI
|
||||
@@ -450,7 +444,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
&mut interactive.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?;
|
||||
let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Exec(mut exec_cli)) => {
|
||||
@@ -505,7 +499,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
all,
|
||||
config_overrides,
|
||||
);
|
||||
let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?;
|
||||
let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
@@ -656,40 +650,6 @@ fn prepend_config_flags(
|
||||
.splice(0..0, cli_config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
/// Run the interactive Codex TUI, dispatching to either the legacy implementation or the
|
||||
/// experimental TUI v2 shim based on feature flags resolved from config.
|
||||
async fn run_interactive_tui(
|
||||
interactive: TuiCli,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
) -> std::io::Result<AppExitInfo> {
|
||||
if is_tui2_enabled(&interactive).await? {
|
||||
let result = tui2::run_main(interactive.into(), codex_linux_sandbox_exe).await?;
|
||||
Ok(result.into())
|
||||
} else {
|
||||
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(true)` when the resolved configuration enables the `tui2` feature flag.
|
||||
///
|
||||
/// This performs a lightweight config load (honoring the same precedence as the lower-level TUI
|
||||
/// bootstrap: `$CODEX_HOME`, config.toml, profile, and CLI `-c` overrides) solely to decide which
|
||||
/// TUI frontend to launch. The full configuration is still loaded later by the interactive TUI.
|
||||
async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> {
|
||||
let raw_overrides = cli.config_overrides.raw_overrides.clone();
|
||||
let overrides_cli = codex_common::CliConfigOverrides { raw_overrides };
|
||||
let cli_kv_overrides = overrides_cli
|
||||
.parse_overrides()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
|
||||
|
||||
let codex_home = find_codex_home()?;
|
||||
let config_toml = load_config_as_toml_with_cli_overrides(&codex_home, cli_kv_overrides).await?;
|
||||
let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?;
|
||||
let overrides = FeatureOverrides::default();
|
||||
let features = Features::from_config(&config_toml, &config_profile, overrides);
|
||||
Ok(features.enabled(Feature::Tui2))
|
||||
}
|
||||
|
||||
/// Build the final `TuiCli` for a `codex resume` invocation.
|
||||
fn finalize_resume_interactive(
|
||||
mut interactive: TuiCli,
|
||||
|
||||
@@ -14,7 +14,6 @@ use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::mcp::auth::compute_auth_statuses;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_rmcp_client::delete_oauth_tokens;
|
||||
@@ -53,11 +52,9 @@ pub enum McpSubcommand {
|
||||
Remove(RemoveArgs),
|
||||
|
||||
/// [experimental] Authenticate with a configured MCP server via OAuth.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Login(LoginArgs),
|
||||
|
||||
/// [experimental] Remove stored OAuth credentials for a server.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Logout(LogoutArgs),
|
||||
}
|
||||
|
||||
@@ -283,24 +280,17 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
{
|
||||
match supports_oauth_login(&url).await {
|
||||
Ok(true) => {
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
println!(
|
||||
"MCP server supports login. Add `features.rmcp_client = true` \
|
||||
to your config.toml and run `codex mcp login {name}` to login."
|
||||
);
|
||||
} else {
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(_) => println!(
|
||||
@@ -353,12 +343,6 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
bail!(
|
||||
"OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
|
||||
);
|
||||
}
|
||||
|
||||
let LoginArgs { name, scopes } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
|
||||
@@ -1,7 +1,24 @@
|
||||
use std::ffi::OsStr;
|
||||
|
||||
/// Returns true if the current process is running under WSL.
|
||||
pub use codex_core::env::is_wsl;
|
||||
/// WSL-specific path helpers used by the updater logic.
|
||||
///
|
||||
/// See https://github.com/openai/codex/issues/6086.
|
||||
pub fn is_wsl() -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
if std::env::var_os("WSL_DISTRO_NAME").is_some() {
|
||||
return true;
|
||||
}
|
||||
match std::fs::read_to_string("/proc/version") {
|
||||
Ok(version) => version.to_lowercase().contains("microsoft"),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a Windows absolute path (`C:\foo\bar` or `C:/foo/bar`) to a WSL mount path (`/mnt/c/foo/bar`).
|
||||
/// Returns `None` if the input does not look like a Windows drive path.
|
||||
|
||||
@@ -8,12 +8,7 @@ use tempfile::TempDir;
|
||||
#[test]
|
||||
fn execpolicy_check_matches_expected_json() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let policy_path = codex_home.path().join("rules").join("policy.rules");
|
||||
fs::create_dir_all(
|
||||
policy_path
|
||||
.parent()
|
||||
.expect("policy path should have a parent"),
|
||||
)?;
|
||||
let policy_path = codex_home.path().join("policy.codexpolicy");
|
||||
fs::write(
|
||||
&policy_path,
|
||||
r#"
|
||||
@@ -29,7 +24,7 @@ prefix_rule(
|
||||
.args([
|
||||
"execpolicy",
|
||||
"check",
|
||||
"--rules",
|
||||
"--policy",
|
||||
policy_path
|
||||
.to_str()
|
||||
.expect("policy path should be valid UTF-8"),
|
||||
@@ -45,15 +40,17 @@ prefix_rule(
|
||||
assert_eq!(
|
||||
result,
|
||||
json!({
|
||||
"decision": "forbidden",
|
||||
"matchedRules": [
|
||||
{
|
||||
"prefixRuleMatch": {
|
||||
"matchedPrefix": ["git", "push"],
|
||||
"decision": "forbidden"
|
||||
"match": {
|
||||
"decision": "forbidden",
|
||||
"matchedRules": [
|
||||
{
|
||||
"prefixRuleMatch": {
|
||||
"matchedPrefix": ["git", "push"],
|
||||
"decision": "forbidden"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
|
||||
@@ -127,7 +127,6 @@ impl Default for TaskText {
|
||||
#[async_trait::async_trait]
|
||||
pub trait CloudBackend: Send + Sync {
|
||||
async fn list_tasks(&self, env: Option<&str>) -> Result<Vec<TaskSummary>>;
|
||||
async fn get_task_summary(&self, id: TaskId) -> Result<TaskSummary>;
|
||||
async fn get_task_diff(&self, id: TaskId) -> Result<Option<String>>;
|
||||
/// Return assistant output messages (no diff) when available.
|
||||
async fn get_task_messages(&self, id: TaskId) -> Result<Vec<String>>;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user