mirror of
https://github.com/openai/codex.git
synced 2026-02-02 06:57:03 +00:00
Compare commits
144 Commits
pr/network
...
dev/cc/tmp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50437ca302 | ||
|
|
e591cde2d5 | ||
|
|
c1162dedca | ||
|
|
303ea308bc | ||
|
|
720fa67816 | ||
|
|
fabb797097 | ||
|
|
807f8a43c2 | ||
|
|
1d8e2b4da8 | ||
|
|
bba5e5e0d4 | ||
|
|
8f10d3bf05 | ||
|
|
468ee8a75c | ||
|
|
0b53aed2d0 | ||
|
|
649badd102 | ||
|
|
a8e0fe8bb9 | ||
|
|
e139ef3e67 | ||
|
|
db1423ae8b | ||
|
|
1d678c8187 | ||
|
|
181ff89cbd | ||
|
|
5678213058 | ||
|
|
279283fe02 | ||
|
|
0c1658d0ec | ||
|
|
19525efb22 | ||
|
|
90f37e8549 | ||
|
|
ee9d441777 | ||
|
|
1b5095b5d1 | ||
|
|
c673e7adb6 | ||
|
|
6846bc1115 | ||
|
|
efd2d76484 | ||
|
|
82fcc087b5 | ||
|
|
3cfa4bc8be | ||
|
|
ab753387cc | ||
|
|
2de731490e | ||
|
|
7078a0b676 | ||
|
|
79ce79a62e | ||
|
|
66b7c673e9 | ||
|
|
13c42a077c | ||
|
|
a48904de72 | ||
|
|
4313e0a710 | ||
|
|
ce3ff29932 | ||
|
|
810ebe0d2b | ||
|
|
bf732600ea | ||
|
|
38de0a1de4 | ||
|
|
e61bae12e3 | ||
|
|
96a65ff0ed | ||
|
|
40de81e7af | ||
|
|
972b5853a0 | ||
|
|
fb24c47bea | ||
|
|
f2b740c95d | ||
|
|
0130a2fa40 | ||
|
|
53eb2e9f27 | ||
|
|
2828549323 | ||
|
|
cbc5fb9acf | ||
|
|
310f2114ae | ||
|
|
e27d9bd88f | ||
|
|
414fbe0da9 | ||
|
|
277babba79 | ||
|
|
14dbd0610a | ||
|
|
f6275a5142 | ||
|
|
7d0c5c7bd5 | ||
|
|
4673090f73 | ||
|
|
8e900c210c | ||
|
|
6b2ef216f1 | ||
|
|
d65fe38b2c | ||
|
|
7809e36a92 | ||
|
|
0237459f71 | ||
|
|
314937fb11 | ||
|
|
8ff16a7714 | ||
|
|
96fdbdd434 | ||
|
|
33e1d0844a | ||
|
|
b24b7884c7 | ||
|
|
8d5ab97f2b | ||
|
|
6c8470953f | ||
|
|
334dbe51c6 | ||
|
|
5a0b5d1bd1 | ||
|
|
45727b9ed3 | ||
|
|
372de6d2c5 | ||
|
|
7a8407bbb6 | ||
|
|
4e6d6cd798 | ||
|
|
3c353a3aca | ||
|
|
99cbba8ea5 | ||
|
|
aa83d7da24 | ||
|
|
d281bcfcd4 | ||
|
|
fab1ded484 | ||
|
|
987dd7fde3 | ||
|
|
63942b883c | ||
|
|
a6974087e5 | ||
|
|
f0dc6fd3c7 | ||
|
|
797a68b9f2 | ||
|
|
dc61fc5f50 | ||
|
|
ec3738b47e | ||
|
|
1d4463ba81 | ||
|
|
e3d3445748 | ||
|
|
0a7021de72 | ||
|
|
7e5c343ef5 | ||
|
|
014235f533 | ||
|
|
b15b5082c6 | ||
|
|
37071e7e5c | ||
|
|
eeda6a5004 | ||
|
|
6f94a90797 | ||
|
|
339b052d68 | ||
|
|
f4371d2f6c | ||
|
|
8120c8765b | ||
|
|
d35337227a | ||
|
|
ba835c3c36 | ||
|
|
dcc01198e2 | ||
|
|
6c76d17713 | ||
|
|
3429de21b3 | ||
|
|
3d4ced3ff5 | ||
|
|
2d9826098e | ||
|
|
46baedd7cb | ||
|
|
358a5baba0 | ||
|
|
1cd1cf17c6 | ||
|
|
53f53173a8 | ||
|
|
9fb9ed6cea | ||
|
|
8f0b383621 | ||
|
|
d7ae342ff4 | ||
|
|
2f048f2063 | ||
|
|
4fb0b547d6 | ||
|
|
87abf06e78 | ||
|
|
6395430220 | ||
|
|
df46ea48a2 | ||
|
|
e9023d5662 | ||
|
|
ad41182ee8 | ||
|
|
2e5d52cb14 | ||
|
|
be274cbe62 | ||
|
|
7157421daa | ||
|
|
b903285746 | ||
|
|
425c8dc372 | ||
|
|
aea47b6553 | ||
|
|
f084e5264b | ||
|
|
4c9d589f14 | ||
|
|
deafead169 | ||
|
|
374d591311 | ||
|
|
9bf41e9262 | ||
|
|
1cfacbf56d | ||
|
|
cea76b85af | ||
|
|
5c8d22138a | ||
|
|
e1deeefa0f | ||
|
|
580c59aa9a | ||
|
|
50dafbc31b | ||
|
|
da3869eeb6 | ||
|
|
6f102e18c4 | ||
|
|
a8797019a1 | ||
|
|
774bd9e432 |
@@ -1,2 +1,3 @@
|
||||
iTerm
|
||||
iTerm2
|
||||
psuedo
|
||||
@@ -3,4 +3,4 @@
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
|
||||
check-hidden = true
|
||||
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
|
||||
ignore-words-list = ratatui,ser
|
||||
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm
|
||||
|
||||
76
.github/actions/macos-code-sign/action.yml
vendored
76
.github/actions/macos-code-sign/action.yml
vendored
@@ -4,6 +4,14 @@ inputs:
|
||||
target:
|
||||
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
|
||||
required: true
|
||||
sign-binaries:
|
||||
description: Whether to sign and notarize the macOS binaries.
|
||||
required: false
|
||||
default: "true"
|
||||
sign-dmg:
|
||||
description: Whether to sign and notarize the macOS dmg.
|
||||
required: false
|
||||
default: "true"
|
||||
apple-certificate:
|
||||
description: Base64-encoded Apple signing certificate (P12).
|
||||
required: true
|
||||
@@ -107,6 +115,7 @@ runs:
|
||||
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
|
||||
|
||||
- name: Sign macOS binaries
|
||||
if: ${{ inputs.sign-binaries == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
@@ -127,6 +136,7 @@ runs:
|
||||
done
|
||||
|
||||
- name: Notarize macOS binaries
|
||||
if: ${{ inputs.sign-binaries == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
|
||||
@@ -149,6 +159,8 @@ runs:
|
||||
}
|
||||
trap cleanup_notary EXIT
|
||||
|
||||
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
|
||||
|
||||
notarize_binary() {
|
||||
local binary="$1"
|
||||
local source_path="codex-rs/target/${{ inputs.target }}/release/${binary}"
|
||||
@@ -162,32 +174,54 @@ runs:
|
||||
rm -f "$archive_path"
|
||||
ditto -c -k --keepParent "$source_path" "$archive_path"
|
||||
|
||||
submission_json=$(xcrun notarytool submit "$archive_path" \
|
||||
--key "$notary_key_path" \
|
||||
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
|
||||
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
|
||||
--output-format json \
|
||||
--wait)
|
||||
|
||||
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
|
||||
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
|
||||
|
||||
if [[ -z "$submission_id" ]]; then
|
||||
echo "Failed to retrieve submission ID for $binary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
|
||||
|
||||
if [[ "$status" != "Accepted" ]]; then
|
||||
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
|
||||
exit 1
|
||||
fi
|
||||
notarize_submission "$binary" "$archive_path" "$notary_key_path"
|
||||
}
|
||||
|
||||
notarize_binary "codex"
|
||||
notarize_binary "codex-responses-api-proxy"
|
||||
|
||||
- name: Sign and notarize macOS dmg
|
||||
if: ${{ inputs.sign-dmg == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
|
||||
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
|
||||
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
for var in APPLE_CODESIGN_IDENTITY APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
|
||||
if [[ -z "${!var:-}" ]]; then
|
||||
echo "$var is required"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
|
||||
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
|
||||
cleanup_notary() {
|
||||
rm -f "$notary_key_path"
|
||||
}
|
||||
trap cleanup_notary EXIT
|
||||
|
||||
source "$GITHUB_ACTION_PATH/notary_helpers.sh"
|
||||
|
||||
dmg_path="codex-rs/target/${{ inputs.target }}/release/codex-${{ inputs.target }}.dmg"
|
||||
|
||||
if [[ ! -f "$dmg_path" ]]; then
|
||||
echo "dmg $dmg_path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
keychain_args=()
|
||||
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
|
||||
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
|
||||
fi
|
||||
|
||||
codesign --force --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$dmg_path"
|
||||
notarize_submission "codex-${{ inputs.target }}.dmg" "$dmg_path" "$notary_key_path"
|
||||
xcrun stapler staple "$dmg_path"
|
||||
|
||||
- name: Remove signing keychain
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
|
||||
46
.github/actions/macos-code-sign/notary_helpers.sh
vendored
Normal file
46
.github/actions/macos-code-sign/notary_helpers.sh
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
notarize_submission() {
|
||||
local label="$1"
|
||||
local path="$2"
|
||||
local notary_key_path="$3"
|
||||
|
||||
if [[ -z "${APPLE_NOTARIZATION_KEY_ID:-}" || -z "${APPLE_NOTARIZATION_ISSUER_ID:-}" ]]; then
|
||||
echo "APPLE_NOTARIZATION_KEY_ID and APPLE_NOTARIZATION_ISSUER_ID are required for notarization"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$notary_key_path" || ! -f "$notary_key_path" ]]; then
|
||||
echo "Notary key file $notary_key_path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$path" ]]; then
|
||||
echo "Notarization payload $path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local submission_json
|
||||
submission_json=$(xcrun notarytool submit "$path" \
|
||||
--key "$notary_key_path" \
|
||||
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
|
||||
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
|
||||
--output-format json \
|
||||
--wait)
|
||||
|
||||
local status submission_id
|
||||
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
|
||||
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
|
||||
|
||||
if [[ -z "$submission_id" ]]; then
|
||||
echo "Failed to retrieve submission ID for $label"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "::notice title=Notarization::$label submission ${submission_id} completed with status ${status}"
|
||||
|
||||
if [[ "$status" != "Accepted" ]]; then
|
||||
echo "Notarization failed for ${label} (submission ${submission_id}, status ${status})"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
BIN
.github/codex-cli-login.png
vendored
BIN
.github/codex-cli-login.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 2.9 MiB |
BIN
.github/codex-cli-permissions.png
vendored
BIN
.github/codex-cli-permissions.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 408 KiB |
BIN
.github/codex-cli-splash.png
vendored
BIN
.github/codex-cli-splash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 3.1 MiB After Width: | Height: | Size: 818 KiB |
BIN
.github/demo.gif
vendored
BIN
.github/demo.gif
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 19 MiB |
2
.github/workflows/cargo-deny.yml
vendored
2
.github/workflows/cargo-deny.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@v1
|
||||
uses: EmbarkStudios/cargo-deny-action@v2
|
||||
with:
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Use a rust-release version that includes all native binaries.
|
||||
CODEX_VERSION=0.74.0-alpha.3
|
||||
CODEX_VERSION=0.74.0
|
||||
OUTPUT_DIR="${RUNNER_TEMP}"
|
||||
python3 ./scripts/stage_npm_packages.py \
|
||||
--release-version "$CODEX_VERSION" \
|
||||
|
||||
@@ -12,6 +12,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
close-stale-contributor-prs:
|
||||
# Prevent scheduled runs on forks
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Close inactive PRs from contributors
|
||||
|
||||
3
.github/workflows/issue-deduplicator.yml
vendored
3
.github/workflows/issue-deduplicator.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-duplicates:
|
||||
name: Identify potential duplicates
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
3
.github/workflows/issue-labeler.yml
vendored
3
.github/workflows/issue-labeler.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-labels:
|
||||
name: Generate label suggestions
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
4
.github/workflows/rust-release-prepare.yml
vendored
4
.github/workflows/rust-release-prepare.yml
vendored
@@ -14,6 +14,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
@@ -41,7 +43,7 @@ jobs:
|
||||
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
|
||||
|
||||
- name: Open pull request (if changed)
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
commit-message: "Update models.json"
|
||||
title: "Update models.json"
|
||||
|
||||
71
.github/workflows/rust-release.yml
vendored
71
.github/workflows/rust-release.yml
vendored
@@ -128,11 +128,72 @@ jobs:
|
||||
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
|
||||
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||
name: MacOS code signing
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: MacOS code signing (binaries)
|
||||
uses: ./.github/actions/macos-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
sign-binaries: "true"
|
||||
sign-dmg: "false"
|
||||
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
|
||||
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
|
||||
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
|
||||
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
|
||||
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
|
||||
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: Build macOS dmg
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
target="${{ matrix.target }}"
|
||||
release_dir="target/${target}/release"
|
||||
dmg_root="${RUNNER_TEMP}/codex-dmg-root"
|
||||
volname="Codex (${target})"
|
||||
dmg_path="${release_dir}/codex-${target}.dmg"
|
||||
|
||||
# The previous "MacOS code signing (binaries)" step signs + notarizes the
|
||||
# built artifacts in `${release_dir}`. This step packages *those same*
|
||||
# signed binaries into a dmg.
|
||||
codex_binary_path="${release_dir}/codex"
|
||||
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
|
||||
|
||||
rm -rf "$dmg_root"
|
||||
mkdir -p "$dmg_root"
|
||||
|
||||
if [[ ! -f "$codex_binary_path" ]]; then
|
||||
echo "Binary $codex_binary_path not found"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "$proxy_binary_path" ]]; then
|
||||
echo "Binary $proxy_binary_path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ditto "$codex_binary_path" "${dmg_root}/codex"
|
||||
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
|
||||
|
||||
rm -f "$dmg_path"
|
||||
hdiutil create \
|
||||
-volname "$volname" \
|
||||
-srcfolder "$dmg_root" \
|
||||
-format UDZO \
|
||||
-ov \
|
||||
"$dmg_path"
|
||||
|
||||
if [[ ! -f "$dmg_path" ]]; then
|
||||
echo "dmg $dmg_path not found after build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: MacOS code signing (dmg)
|
||||
uses: ./.github/actions/macos-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
sign-binaries: "false"
|
||||
sign-dmg: "true"
|
||||
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
|
||||
apple-certificate-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
|
||||
apple-notarization-key-p8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
|
||||
@@ -160,6 +221,10 @@ jobs:
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
|
||||
fi
|
||||
|
||||
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
|
||||
fi
|
||||
|
||||
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
||||
name: Install zstd
|
||||
shell: powershell
|
||||
@@ -194,7 +259,7 @@ jobs:
|
||||
base="$(basename "$f")"
|
||||
# Skip files that are already archives (shouldn't happen, but be
|
||||
# safe).
|
||||
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
|
||||
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -85,3 +85,8 @@ CHANGELOG.ignore.md
|
||||
# nix related
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
# Python bytecode files
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
|
||||
@@ -77,6 +77,12 @@ If you don’t have the tool:
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
|
||||
|
||||
### Spawning workspace binaries in tests (Cargo vs Buck2)
|
||||
|
||||
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
|
||||
- Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first.
|
||||
- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||
|
||||
77
README.md
77
README.md
@@ -1,13 +1,11 @@
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
</br>
|
||||
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
</p>
|
||||
</p>
|
||||
</br>
|
||||
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
|
||||
---
|
||||
|
||||
@@ -15,25 +13,19 @@
|
||||
|
||||
### Installing and running Codex CLI
|
||||
|
||||
Install globally with your preferred package manager. If you use npm:
|
||||
Install globally with your preferred package manager:
|
||||
|
||||
```shell
|
||||
# Install using npm
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Alternatively, if you use Homebrew:
|
||||
|
||||
```shell
|
||||
# Install using Homebrew
|
||||
brew install --cask codex
|
||||
```
|
||||
|
||||
Then simply run `codex` to get started:
|
||||
|
||||
```shell
|
||||
codex
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||
Then simply run `codex` to get started.
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
@@ -53,60 +45,15 @@ Each archive contains a single entry with the platform baked into the name (e.g.
|
||||
|
||||
### Using Codex with your ChatGPT plan
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
|
||||
</p>
|
||||
|
||||
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
|
||||
|
||||
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
|
||||
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
|
||||
|
||||
### Model Context Protocol (MCP)
|
||||
## Docs
|
||||
|
||||
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
|
||||
### Execpolicy
|
||||
|
||||
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
- [**Getting started**](./docs/getting-started.md)
|
||||
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](./docs/slash_commands.md)
|
||||
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Execpolicy quickstart**](./docs/execpolicy.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- **Automating Codex**
|
||||
- [GitHub Action](https://github.com/openai/codex-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
- [**Codex Documentation**](https://developers.openai.com/codex)
|
||||
- [**Contributing**](./docs/contributing.md)
|
||||
- [**Install & build**](./docs/install.md)
|
||||
- [System Requirements](./docs/install.md#system-requirements)
|
||||
- [DotSlash](./docs/install.md#dotslash)
|
||||
- [Build from source](./docs/install.md#build-from-source)
|
||||
- [**FAQ**](./docs/faq.md)
|
||||
- [**Installing & building**](./docs/install.md)
|
||||
- [**Open source fund**](./docs/open-source-fund.md)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
@@ -12,6 +13,7 @@ import zipfile
|
||||
from dataclasses import dataclass
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import Iterable, Sequence
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlopen
|
||||
@@ -77,6 +79,45 @@ RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
|
||||
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
|
||||
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
||||
|
||||
# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI.
|
||||
DOWNLOAD_TIMEOUT_SECS = 60
|
||||
|
||||
|
||||
def _gha_enabled() -> bool:
|
||||
# GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs
|
||||
# much easier to scan: groups collapse noisy sections and error annotations surface the
|
||||
# failure in the UI without changing the actual exception/traceback output.
|
||||
return os.environ.get("GITHUB_ACTIONS") == "true"
|
||||
|
||||
|
||||
def _gha_escape(value: str) -> str:
|
||||
# Workflow commands require percent/newline escaping.
|
||||
return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
|
||||
|
||||
|
||||
def _gha_error(*, title: str, message: str) -> None:
|
||||
# Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just
|
||||
# adds a prominent summary line to the job UI so the root cause is easier to spot.
|
||||
if not _gha_enabled():
|
||||
return
|
||||
print(
|
||||
f"::error title={_gha_escape(title)}::{_gha_escape(message)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _gha_group(title: str):
|
||||
# Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op
|
||||
# so local output remains unchanged.
|
||||
if _gha_enabled():
|
||||
print(f"::group::{_gha_escape(title)}", flush=True)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if _gha_enabled():
|
||||
print("::endgroup::", flush=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||
@@ -131,18 +172,20 @@ def main() -> int:
|
||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
with _gha_group(f"Download native artifacts from workflow {workflow_id}"):
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
|
||||
if "rg" in components:
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
with _gha_group("Fetch ripgrep binaries"):
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
|
||||
print(f"Installed native dependencies into {vendor_dir}")
|
||||
return 0
|
||||
@@ -203,7 +246,14 @@ def fetch_rg(
|
||||
|
||||
for future in as_completed(future_map):
|
||||
target = future_map[future]
|
||||
results[target] = future.result()
|
||||
try:
|
||||
results[target] = future.result()
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep install failed",
|
||||
message=f"target={target} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc
|
||||
print(f" installed ripgrep for {target}")
|
||||
|
||||
return [results[target] for target in targets]
|
||||
@@ -301,6 +351,8 @@ def _fetch_single_rg(
|
||||
url = providers[0]["url"]
|
||||
archive_format = platform_info.get("format", "zst")
|
||||
archive_member = platform_info.get("path")
|
||||
digest = platform_info.get("digest")
|
||||
expected_size = platform_info.get("size")
|
||||
|
||||
dest_dir = vendor_dir / target / "path"
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -313,10 +365,32 @@ def _fetch_single_rg(
|
||||
tmp_dir = Path(tmp_dir_str)
|
||||
archive_filename = os.path.basename(urlparse(url).path)
|
||||
download_path = tmp_dir / archive_filename
|
||||
_download_file(url, download_path)
|
||||
print(
|
||||
f" downloading ripgrep for {target} ({platform_key}) from {url}",
|
||||
flush=True,
|
||||
)
|
||||
try:
|
||||
_download_file(url, download_path)
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep download failed",
|
||||
message=f"target={target} platform={platform_key} url={url} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Failed to download ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})."
|
||||
) from exc
|
||||
|
||||
dest.unlink(missing_ok=True)
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
try:
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
except Exception as exc:
|
||||
raise RuntimeError(
|
||||
"Failed to extract ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"member={archive_member!r}, url={url}, archive={download_path})."
|
||||
) from exc
|
||||
|
||||
if not is_windows:
|
||||
dest.chmod(0o755)
|
||||
@@ -326,7 +400,9 @@ def _fetch_single_rg(
|
||||
|
||||
def _download_file(url: str, dest: Path) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
with urlopen(url) as response, open(dest, "wb") as out:
|
||||
dest.unlink(missing_ok=True)
|
||||
|
||||
with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(response, out)
|
||||
|
||||
|
||||
|
||||
475
codex-rs/Cargo.lock
generated
475
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -36,6 +36,7 @@ members = [
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/cargo-bin",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -93,6 +94,7 @@ codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-pty = { path = "utils/pty" }
|
||||
@@ -141,11 +143,12 @@ icu_locale_core = "2.1"
|
||||
icu_provider = { version = "2.1", features = ["sync"] }
|
||||
ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.44.3"
|
||||
insta = "1.46.0"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.1"
|
||||
landlock = "0.4.4"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.177"
|
||||
log = "0.4"
|
||||
@@ -157,12 +160,12 @@ notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
opentelemetry-otlp = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
opentelemetry_sdk = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.31.0"
|
||||
opentelemetry-appender-tracing = "0.31.0"
|
||||
opentelemetry-otlp = "0.31.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
opentelemetry_sdk = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
@@ -173,11 +176,12 @@ pretty_assertions = "1.4.1"
|
||||
pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-core = "0.1.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex = "1.12.2"
|
||||
regex-lite = "0.1.7"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.10.0", default-features = false }
|
||||
rmcp = { version = "0.12.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
sentry = "0.46.0"
|
||||
@@ -197,26 +201,26 @@ strum_macros = "0.27.2"
|
||||
supports-color = "3.0.2"
|
||||
sys-locale = "0.3.2"
|
||||
tempfile = "3.23.0"
|
||||
test-log = "0.2.18"
|
||||
test-log = "0.2.19"
|
||||
textwrap = "0.16.2"
|
||||
thiserror = "2.0.17"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.17"
|
||||
tokio-stream = "0.1.18"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.5"
|
||||
tonic = "0.13.1"
|
||||
toml_edit = "0.24.0"
|
||||
tracing = "0.1.43"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-subscriber = "0.3.22"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
tui-scrollbar = "0.2.1"
|
||||
uds_windows = "1.1.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
|
||||
@@ -15,8 +15,8 @@ You can also install via Homebrew (`brew install --cask codex`) or download a pl
|
||||
|
||||
## Documentation quickstart
|
||||
|
||||
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
|
||||
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
|
||||
- First run with Codex? Start with [`docs/getting-started.md`](../docs/getting-started.md) (links to the walkthrough for prompts, keyboard shortcuts, and session management).
|
||||
- Want deeper control? See [`docs/config.md`](../docs/config.md) and [`docs/install.md`](../docs/install.md).
|
||||
|
||||
## What's new in the Rust CLI
|
||||
|
||||
@@ -30,7 +30,7 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
|
||||
|
||||
#### MCP client
|
||||
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#connecting-to-mcp-servers) for details.
|
||||
|
||||
#### MCP server (experimental)
|
||||
|
||||
|
||||
@@ -539,6 +539,7 @@ server_notification_definitions! {
|
||||
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
|
||||
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
|
||||
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
|
||||
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
|
||||
|
||||
/// Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.
|
||||
WindowsWorldWritableWarning => "windows/worldWritableWarning" (v2::WindowsWorldWritableWarningNotification),
|
||||
|
||||
@@ -384,6 +384,8 @@ pub struct SendUserTurnParams {
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub summary: ReasoningSummary,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -18,6 +18,7 @@ use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
use codex_protocol::protocol::SessionSource as CoreSessionSource;
|
||||
@@ -208,6 +209,7 @@ v2_enum_from_core!(
|
||||
}
|
||||
);
|
||||
|
||||
// TODO(mbolin): Support in-repo layer.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(tag = "type", rename_all = "camelCase")]
|
||||
#[ts(tag = "type")]
|
||||
@@ -216,17 +218,78 @@ pub enum ConfigLayerSource {
|
||||
/// Managed preferences layer delivered by MDM (macOS only).
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
Mdm { domain: String, key: String },
|
||||
Mdm {
|
||||
domain: String,
|
||||
key: String,
|
||||
},
|
||||
|
||||
/// Managed config layer from a file (usually `managed_config.toml`).
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
System { file: AbsolutePathBuf },
|
||||
/// Session-layer overrides supplied via `-c`/`--config`.
|
||||
SessionFlags,
|
||||
/// User config layer from a file (usually `config.toml`).
|
||||
System {
|
||||
/// This is the path to the system config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// User config layer from $CODEX_HOME/config.toml. This layer is special
|
||||
/// in that it is expected to be:
|
||||
/// - writable by the user
|
||||
/// - generally outside the workspace directory
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
User { file: AbsolutePathBuf },
|
||||
User {
|
||||
/// This is the path to the user's config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Path to a .codex/ folder within a project. There could be multiple of
|
||||
/// these between `cwd` and the project/repo root.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
Project {
|
||||
dot_codex_folder: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Session-layer overrides supplied via `-c`/`--config`.
|
||||
SessionFlags,
|
||||
|
||||
/// `managed_config.toml` was designed to be a config that was loaded
|
||||
/// as the last layer on top of everything else. This scheme did not quite
|
||||
/// work out as intended, but we keep this variant as a "best effort" while
|
||||
/// we phase out `managed_config.toml` in favor of `requirements.toml`.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
LegacyManagedConfigTomlFromFile {
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
LegacyManagedConfigTomlFromMdm,
|
||||
}
|
||||
|
||||
impl ConfigLayerSource {
|
||||
/// A settings from a layer with a higher precedence will override a setting
|
||||
/// from a layer with a lower precedence.
|
||||
pub fn precedence(&self) -> i16 {
|
||||
match self {
|
||||
ConfigLayerSource::Mdm { .. } => 0,
|
||||
ConfigLayerSource::System { .. } => 10,
|
||||
ConfigLayerSource::User { .. } => 20,
|
||||
ConfigLayerSource::Project { .. } => 25,
|
||||
ConfigLayerSource::SessionFlags => 30,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => 40,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => 50,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compares [ConfigLayerSource] by precedence, so `A < B` means settings from
|
||||
/// layer `A` will be overridden by settings from layer `B`.
|
||||
impl PartialOrd for ConfigLayerSource {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.precedence().cmp(&other.precedence()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
@@ -344,7 +407,7 @@ pub struct ConfigWriteResponse {
|
||||
pub status: WriteStatus,
|
||||
pub version: String,
|
||||
/// Canonical path to the config file that was written.
|
||||
pub file_path: String,
|
||||
pub file_path: AbsolutePathBuf,
|
||||
pub overridden_metadata: Option<OverriddenMetadata>,
|
||||
}
|
||||
|
||||
@@ -357,6 +420,7 @@ pub enum ConfigWriteErrorCode {
|
||||
ConfigValidationError,
|
||||
ConfigPathNotFound,
|
||||
ConfigSchemaUnknownKey,
|
||||
UserLayerNotFound,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -422,6 +486,15 @@ pub enum ApprovalDecision {
|
||||
Cancel,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum NetworkAccess {
|
||||
#[default]
|
||||
Restricted,
|
||||
Enabled,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(tag = "type", rename_all = "camelCase")]
|
||||
#[ts(tag = "type")]
|
||||
@@ -431,6 +504,12 @@ pub enum SandboxPolicy {
|
||||
ReadOnly,
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
ExternalSandbox {
|
||||
#[serde(default)]
|
||||
network_access: NetworkAccess,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
WorkspaceWrite {
|
||||
#[serde(default)]
|
||||
writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -450,6 +529,14 @@ impl SandboxPolicy {
|
||||
codex_protocol::protocol::SandboxPolicy::DangerFullAccess
|
||||
}
|
||||
SandboxPolicy::ReadOnly => codex_protocol::protocol::SandboxPolicy::ReadOnly,
|
||||
SandboxPolicy::ExternalSandbox { network_access } => {
|
||||
codex_protocol::protocol::SandboxPolicy::ExternalSandbox {
|
||||
network_access: match network_access {
|
||||
NetworkAccess::Restricted => CoreNetworkAccess::Restricted,
|
||||
NetworkAccess::Enabled => CoreNetworkAccess::Enabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots,
|
||||
network_access,
|
||||
@@ -472,6 +559,14 @@ impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
|
||||
SandboxPolicy::DangerFullAccess
|
||||
}
|
||||
codex_protocol::protocol::SandboxPolicy::ReadOnly => SandboxPolicy::ReadOnly,
|
||||
codex_protocol::protocol::SandboxPolicy::ExternalSandbox { network_access } => {
|
||||
SandboxPolicy::ExternalSandbox {
|
||||
network_access: match network_access {
|
||||
CoreNetworkAccess::Restricted => NetworkAccess::Restricted,
|
||||
CoreNetworkAccess::Enabled => NetworkAccess::Enabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots,
|
||||
network_access,
|
||||
@@ -980,6 +1075,10 @@ pub struct SkillsListParams {
|
||||
/// When empty, defaults to the current session working directory.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub cwds: Vec<PathBuf>,
|
||||
|
||||
/// When true, bypass the skills cache and re-scan skills from disk.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub force_reload: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -996,7 +1095,8 @@ pub struct SkillsListResponse {
|
||||
pub enum SkillScope {
|
||||
User,
|
||||
Repo,
|
||||
Public,
|
||||
System,
|
||||
Admin,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1005,6 +1105,9 @@ pub enum SkillScope {
|
||||
pub struct SkillMetadata {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
#[ts(optional)]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub short_description: Option<String>,
|
||||
pub path: PathBuf,
|
||||
pub scope: SkillScope,
|
||||
}
|
||||
@@ -1031,6 +1134,7 @@ impl From<CoreSkillMetadata> for SkillMetadata {
|
||||
Self {
|
||||
name: value.name,
|
||||
description: value.description,
|
||||
short_description: value.short_description,
|
||||
path: value.path,
|
||||
scope: value.scope.into(),
|
||||
}
|
||||
@@ -1042,7 +1146,8 @@ impl From<CoreSkillScope> for SkillScope {
|
||||
match value {
|
||||
CoreSkillScope::User => Self::User,
|
||||
CoreSkillScope::Repo => Self::Repo,
|
||||
CoreSkillScope::Public => Self::Public,
|
||||
CoreSkillScope::System => Self::System,
|
||||
CoreSkillScope::Admin => Self::Admin,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1169,6 +1274,8 @@ pub struct Turn {
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub codex_error_info: Option<CodexErrorInfo>,
|
||||
#[serde(default)]
|
||||
pub additional_details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1212,6 +1319,8 @@ pub struct TurnStartParams {
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
/// Override the reasoning summary for this turn and subsequent turns.
|
||||
pub summary: Option<ReasoningSummary>,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1841,6 +1950,16 @@ pub struct AccountLoginCompletedNotification {
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct DeprecationNoticeNotification {
|
||||
/// Concise summary of what is deprecated.
|
||||
pub summary: String,
|
||||
/// Optional extra guidance, such as migration steps or rationale.
|
||||
pub details: Option<String>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1850,11 +1969,30 @@ mod tests {
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::items::UserMessageItem;
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn sandbox_policy_round_trips_external_sandbox_network_access() {
|
||||
let v2_policy = SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Enabled,
|
||||
};
|
||||
|
||||
let core_policy = v2_policy.to_core();
|
||||
assert_eq!(
|
||||
core_policy,
|
||||
codex_protocol::protocol::SandboxPolicy::ExternalSandbox {
|
||||
network_access: CoreNetworkAccess::Enabled,
|
||||
}
|
||||
);
|
||||
|
||||
let back_to_v2 = SandboxPolicy::from(core_policy);
|
||||
assert_eq!(back_to_v2, v2_policy);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn core_turn_item_into_thread_item_converts_supported_variants() {
|
||||
let user_item = TurnItem::UserMessage(UserMessageItem {
|
||||
@@ -1939,6 +2077,30 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skills_list_params_serialization_uses_force_reload() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(SkillsListParams {
|
||||
cwds: Vec::new(),
|
||||
force_reload: false,
|
||||
})
|
||||
.unwrap(),
|
||||
json!({}),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
serde_json::to_value(SkillsListParams {
|
||||
cwds: vec![PathBuf::from("/repo")],
|
||||
force_reload: true,
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"cwds": ["/repo"],
|
||||
"forceReload": true,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codex_error_info_serializes_http_status_code_in_camel_case() {
|
||||
let value = CodexErrorInfo::ResponseTooManyFailedAttempts {
|
||||
|
||||
@@ -13,6 +13,7 @@ use std::time::Duration;
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use clap::ArgAction;
|
||||
use clap::Parser;
|
||||
use clap::Subcommand;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
@@ -65,6 +66,19 @@ struct Cli {
|
||||
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
|
||||
codex_bin: String,
|
||||
|
||||
/// Forwarded to the `codex` CLI as `--config key=value`. Repeatable.
|
||||
///
|
||||
/// Example:
|
||||
/// `--config 'model_providers.mock.base_url="http://localhost:4010/v2"'`
|
||||
#[arg(
|
||||
short = 'c',
|
||||
long = "config",
|
||||
value_name = "key=value",
|
||||
action = ArgAction::Append,
|
||||
global = true
|
||||
)]
|
||||
config_overrides: Vec<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: CliCommand,
|
||||
}
|
||||
@@ -116,29 +130,42 @@ enum CliCommand {
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let Cli { codex_bin, command } = Cli::parse();
|
||||
let Cli {
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
command,
|
||||
} = Cli::parse();
|
||||
|
||||
match command {
|
||||
CliCommand::SendMessage { user_message } => send_message(codex_bin, user_message),
|
||||
CliCommand::SendMessageV2 { user_message } => send_message_v2(codex_bin, user_message),
|
||||
CliCommand::SendMessage { user_message } => {
|
||||
send_message(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessageV2 { user_message } => {
|
||||
send_message_v2(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::TriggerCmdApproval { user_message } => {
|
||||
trigger_cmd_approval(codex_bin, user_message)
|
||||
trigger_cmd_approval(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::TriggerPatchApproval { user_message } => {
|
||||
trigger_patch_approval(codex_bin, user_message)
|
||||
trigger_patch_approval(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(&codex_bin, &config_overrides),
|
||||
CliCommand::SendFollowUpV2 {
|
||||
first_message,
|
||||
follow_up_message,
|
||||
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
|
||||
CliCommand::TestLogin => test_login(codex_bin),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
|
||||
} => send_follow_up_v2(
|
||||
&codex_bin,
|
||||
&config_overrides,
|
||||
first_message,
|
||||
follow_up_message,
|
||||
),
|
||||
CliCommand::TestLogin => test_login(&codex_bin, &config_overrides),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(&codex_bin, &config_overrides),
|
||||
}
|
||||
}
|
||||
|
||||
fn send_message(codex_bin: String, user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn send_message(codex_bin: &str, config_overrides: &[String], user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -159,46 +186,61 @@ fn send_message(codex_bin: String, user_message: String) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_message_v2(codex_bin: String, user_message: String) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, user_message, None, None)
|
||||
fn send_message_v2(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, user_message, None, None)
|
||||
}
|
||||
|
||||
fn trigger_cmd_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
fn trigger_cmd_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn trigger_patch_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
fn trigger_patch_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn no_trigger_cmd_approval(codex_bin: String) -> Result<()> {
|
||||
fn no_trigger_cmd_approval(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let prompt = "Run `touch should_not_trigger_approval.txt`";
|
||||
send_message_v2_with_policies(codex_bin, prompt.to_string(), None, None)
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, prompt.to_string(), None, None)
|
||||
}
|
||||
|
||||
fn send_message_v2_with_policies(
|
||||
codex_bin: String,
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -222,11 +264,12 @@ fn send_message_v2_with_policies(
|
||||
}
|
||||
|
||||
fn send_follow_up_v2(
|
||||
codex_bin: String,
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
first_message: String,
|
||||
follow_up_message: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -259,8 +302,8 @@ fn send_follow_up_v2(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_login(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn test_login(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -289,8 +332,8 @@ fn test_login(codex_bin: String) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_account_rate_limits(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn get_account_rate_limits(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -309,8 +352,12 @@ struct CodexClient {
|
||||
}
|
||||
|
||||
impl CodexClient {
|
||||
fn spawn(codex_bin: String) -> Result<Self> {
|
||||
let mut codex_app_server = Command::new(&codex_bin)
|
||||
fn spawn(codex_bin: &str, config_overrides: &[String]) -> Result<Self> {
|
||||
let mut cmd = Command::new(codex_bin);
|
||||
for override_kv in config_overrides {
|
||||
cmd.arg("--config").arg(override_kv);
|
||||
}
|
||||
let mut codex_app_server = cmd
|
||||
.arg("app-server")
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
|
||||
@@ -48,7 +48,6 @@ uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
app_test_support = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
|
||||
@@ -77,12 +77,12 @@ Example (from OpenAI's official VSCode extension):
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `model/list` — list available models (with reasoning effort options).
|
||||
- `skills/list` — list skills for one or more `cwd` values.
|
||||
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
|
||||
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
|
||||
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering (thread-agnostic; does not include in-repo `.codex/` layers).
|
||||
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
|
||||
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
|
||||
|
||||
@@ -162,7 +162,7 @@ Turns attach user input (text or images) to a thread and trigger Codex generatio
|
||||
- `{"type":"image","url":"https://…png"}`
|
||||
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
|
||||
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread. `outputSchema` applies only to the current turn.
|
||||
|
||||
```json
|
||||
{ "method": "turn/start", "id": 30, "params": {
|
||||
@@ -172,13 +172,20 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
"cwd": "/Users/me/project",
|
||||
"approvalPolicy": "unlessTrusted",
|
||||
"sandboxPolicy": {
|
||||
"mode": "workspaceWrite",
|
||||
"type": "workspaceWrite",
|
||||
"writableRoots": ["/Users/me/project"],
|
||||
"networkAccess": true
|
||||
},
|
||||
"model": "gpt-5.1-codex",
|
||||
"effort": "medium",
|
||||
"summary": "concise"
|
||||
"summary": "concise",
|
||||
// Optional JSON Schema to constrain the final assistant message for this turn.
|
||||
"outputSchema": {
|
||||
"type": "object",
|
||||
"properties": { "answer": { "type": "string" } },
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
} }
|
||||
{ "id": 30, "result": { "turn": {
|
||||
"id": "turn_456",
|
||||
@@ -285,10 +292,12 @@ Run a standalone command (argv vector) in the server’s sandbox without creatin
|
||||
{ "id": 32, "result": { "exitCode": 0, "stdout": "...", "stderr": "" } }
|
||||
```
|
||||
|
||||
- For clients that are already sandboxed externally, set `sandboxPolicy` to `{"type":"externalSandbox","networkAccess":"enabled"}` (or omit `networkAccess` to keep it restricted). Codex will not enforce its own sandbox in this mode; it tells the model it has full file-system access and passes the `networkAccess` state through `environment_context`.
|
||||
|
||||
Notes:
|
||||
|
||||
- Empty `command` arrays are rejected.
|
||||
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags).
|
||||
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags, `externalSandbox` with `networkAccess` `restricted|enabled`).
|
||||
- When omitted, `timeoutMs` falls back to the server default.
|
||||
|
||||
## Events
|
||||
@@ -300,7 +309,7 @@ Event notifications are the server-initiated event stream for thread lifecycles,
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo?, additionalDetails? } }`.
|
||||
- `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
|
||||
- `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
|
||||
|
||||
@@ -350,7 +359,7 @@ There are additional item-specific events:
|
||||
|
||||
### Errors
|
||||
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo?, additionalDetails? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::ContextCompactedNotification;
|
||||
use codex_app_server_protocol::DeprecationNoticeNotification;
|
||||
use codex_app_server_protocol::ErrorNotification;
|
||||
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
||||
@@ -283,6 +284,15 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.send_server_notification(ServerNotification::ContextCompacted(notification))
|
||||
.await;
|
||||
}
|
||||
EventMsg::DeprecationNotice(event) => {
|
||||
let notification = DeprecationNoticeNotification {
|
||||
summary: event.summary,
|
||||
details: event.details,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::DeprecationNotice(notification))
|
||||
.await;
|
||||
}
|
||||
EventMsg::ReasoningContentDelta(event) => {
|
||||
let notification = ReasoningSummaryTextDeltaNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
@@ -330,6 +340,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: None,
|
||||
};
|
||||
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
|
||||
outgoing
|
||||
@@ -347,6 +358,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: ev.additional_details,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
@@ -1330,6 +1342,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1341,6 +1354,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
@@ -1388,6 +1402,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "oops".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1429,6 +1444,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1457,6 +1473,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1681,6 +1698,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1700,6 +1718,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1736,6 +1755,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1756,6 +1776,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::bespoke_event_handling::apply_bespoke_event_handling;
|
||||
use crate::config_api::ConfigApi;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
||||
@@ -155,7 +156,6 @@ use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_utils_json_to_toml::json_to_toml;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
@@ -215,7 +215,7 @@ pub(crate) struct CodexMessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
config_api: ConfigApi,
|
||||
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
||||
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
||||
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
||||
@@ -265,13 +265,14 @@ impl CodexMessageProcessor {
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides.clone());
|
||||
Self {
|
||||
auth_manager,
|
||||
conversation_manager,
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
config,
|
||||
cli_overrides,
|
||||
config_api,
|
||||
conversation_listeners: HashMap::new(),
|
||||
active_login: Arc::new(Mutex::new(None)),
|
||||
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
|
||||
@@ -282,13 +283,7 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn load_latest_config(&self) -> Result<Config, JSONRPCErrorError> {
|
||||
Config::load_with_cli_overrides(self.cli_overrides.clone(), ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
self.config_api.load_latest_thread_agnostic_config().await
|
||||
}
|
||||
|
||||
fn review_request_from_target(
|
||||
@@ -1186,10 +1181,22 @@ impl CodexMessageProcessor {
|
||||
arg0: None,
|
||||
};
|
||||
|
||||
let effective_policy = params
|
||||
.sandbox_policy
|
||||
.map(|policy| policy.to_core())
|
||||
.unwrap_or_else(|| self.config.sandbox_policy.clone());
|
||||
let requested_policy = params.sandbox_policy.map(|policy| policy.to_core());
|
||||
let effective_policy = match requested_policy {
|
||||
Some(policy) => match self.config.sandbox_policy.can_set(&policy) {
|
||||
Ok(()) => policy,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid sandbox policy: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => self.config.sandbox_policy.get().clone(),
|
||||
};
|
||||
|
||||
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
||||
let outgoing = self.outgoing.clone();
|
||||
@@ -1266,18 +1273,20 @@ impl CodexMessageProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
let config = match derive_config_from_params(overrides, Some(cli_overrides)).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config =
|
||||
match derive_config_from_params(&self.config_api, overrides, Some(cli_overrides)).await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(conversation_id) => {
|
||||
@@ -1316,18 +1325,19 @@ impl CodexMessageProcessor {
|
||||
params.developer_instructions,
|
||||
);
|
||||
|
||||
let config = match derive_config_from_params(overrides, params.config).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config =
|
||||
match derive_config_from_params(&self.config_api, overrides, params.config).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(new_conv) => {
|
||||
@@ -1555,7 +1565,7 @@ impl CodexMessageProcessor {
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
);
|
||||
match derive_config_from_params(overrides, cli_overrides).await {
|
||||
match derive_config_from_params(&self.config_api, overrides, cli_overrides).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -1980,16 +1990,6 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
};
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: "OAuth login is only supported when [features].rmcp_client is true in config.toml".to_string(),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let McpServerOauthLoginParams {
|
||||
name,
|
||||
scopes,
|
||||
@@ -2226,7 +2226,7 @@ impl CodexMessageProcessor {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
derive_config_from_params(overrides, Some(cli_overrides)).await
|
||||
derive_config_from_params(&self.config_api, overrides, Some(cli_overrides)).await
|
||||
}
|
||||
None => Ok(self.config.as_ref().clone()),
|
||||
};
|
||||
@@ -2577,6 +2577,7 @@ impl CodexMessageProcessor {
|
||||
let _ = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2596,6 +2597,7 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
output_schema,
|
||||
} = params;
|
||||
|
||||
let Ok(conversation) = self
|
||||
@@ -2630,7 +2632,7 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
final_output_json_schema: None,
|
||||
final_output_json_schema: output_schema,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2640,36 +2642,27 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn skills_list(&self, request_id: RequestId, params: SkillsListParams) {
|
||||
let SkillsListParams { cwds } = params;
|
||||
let SkillsListParams { cwds, force_reload } = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.clone()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
|
||||
let data = if self.config.features.enabled(Feature::Skills) {
|
||||
let skills_manager = self.conversation_manager.skills_manager();
|
||||
cwds.into_iter()
|
||||
.map(|cwd| {
|
||||
let outcome = skills_manager.skills_for_cwd(&cwd);
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills);
|
||||
codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
cwds.into_iter()
|
||||
.map(|cwd| codex_app_server_protocol::SkillsListEntry {
|
||||
let skills_manager = self.conversation_manager.skills_manager();
|
||||
let data = cwds
|
||||
.into_iter()
|
||||
.map(|cwd| {
|
||||
let outcome = skills_manager.skills_for_cwd_with_options(&cwd, force_reload);
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills);
|
||||
codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills: Vec::new(),
|
||||
errors: Vec::new(),
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
skills,
|
||||
errors,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
self.outgoing
|
||||
.send_response(request_id, SkillsListResponse { data })
|
||||
.await;
|
||||
@@ -2748,6 +2741,7 @@ impl CodexMessageProcessor {
|
||||
let turn_id = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: params.output_schema,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -3328,6 +3322,7 @@ fn skills_to_info(
|
||||
.map(|skill| codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
path: skill.path.clone(),
|
||||
scope: skill.scope.into(),
|
||||
})
|
||||
@@ -3347,16 +3342,13 @@ fn errors_to_info(
|
||||
}
|
||||
|
||||
async fn derive_config_from_params(
|
||||
config_api: &ConfigApi,
|
||||
overrides: ConfigOverrides,
|
||||
cli_overrides: Option<std::collections::HashMap<String, serde_json::Value>>,
|
||||
cli_overrides: Option<HashMap<String, serde_json::Value>>,
|
||||
) -> std::io::Result<Config> {
|
||||
let cli_overrides = cli_overrides
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, json_to_toml(v)))
|
||||
.collect();
|
||||
|
||||
Config::load_with_cli_overrides(cli_overrides, overrides).await
|
||||
config_api
|
||||
.load_thread_agnostic_config(overrides, cli_overrides)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_summary_from_rollout(
|
||||
|
||||
@@ -7,21 +7,28 @@ use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigServiceError;
|
||||
use codex_utils_json_to_toml::json_to_toml;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
service: ConfigService,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
Self {
|
||||
service: ConfigService::new(codex_home, cli_overrides),
|
||||
service: ConfigService::new(codex_home.clone(), cli_overrides.clone()),
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +39,30 @@ impl ConfigApi {
|
||||
self.service.read(params).await.map_err(map_error)
|
||||
}
|
||||
|
||||
pub(crate) async fn load_thread_agnostic_config(
|
||||
&self,
|
||||
overrides: codex_core::config::ConfigOverrides,
|
||||
request_cli_overrides: Option<std::collections::HashMap<String, serde_json::Value>>,
|
||||
) -> std::io::Result<Config> {
|
||||
// Apply the app server's startup `--config` overrides, then apply request-scoped overrides
|
||||
// with higher precedence.
|
||||
let mut merged_cli_overrides = self.cli_overrides.clone();
|
||||
merged_cli_overrides.extend(
|
||||
request_cli_overrides
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, json_to_toml(v))),
|
||||
);
|
||||
|
||||
ConfigBuilder::default()
|
||||
.codex_home(self.codex_home.clone())
|
||||
.cli_overrides(merged_cli_overrides)
|
||||
.harness_overrides(overrides)
|
||||
.thread_agnostic()
|
||||
.build()
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
@@ -45,6 +76,18 @@ impl ConfigApi {
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
self.service.batch_write(params).await.map_err(map_error)
|
||||
}
|
||||
|
||||
pub(crate) async fn load_latest_thread_agnostic_config(
|
||||
&self,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.load_thread_agnostic_config(codex_core::config::ConfigOverrides::default(), None)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::num::NonZero;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -63,11 +62,7 @@ pub(crate) async fn run_fuzzy_file_search(
|
||||
Ok(Ok((root, res))) => {
|
||||
for m in res.matches {
|
||||
let path = m.path;
|
||||
//TODO(shijie): Move file name generation to file_search lib.
|
||||
let file_name = Path::new(&path)
|
||||
.file_name()
|
||||
.map(|name| name.to_string_lossy().into_owned())
|
||||
.unwrap_or_else(|| path.clone());
|
||||
let file_name = file_search::file_name_from_path(&path);
|
||||
let result = FuzzyFileSearchResult {
|
||||
root: root.clone(),
|
||||
path,
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
@@ -18,13 +17,11 @@ use tokio::io::BufReader;
|
||||
use tokio::io::{self};
|
||||
use tokio::sync::mpsc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Level;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::Layer;
|
||||
use tracing_subscriber::filter::Targets;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
@@ -81,12 +78,11 @@ pub async fn run_main(
|
||||
format!("error parsing -c overrides: {e}"),
|
||||
)
|
||||
})?;
|
||||
let config =
|
||||
Config::load_with_cli_overrides(cli_kv_overrides.clone(), ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides.clone())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
let feedback = CodexFeedback::new();
|
||||
|
||||
@@ -105,11 +101,8 @@ pub async fn run_main(
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
|
||||
.with_filter(EnvFilter::from_default_env());
|
||||
|
||||
let feedback_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(feedback.make_writer())
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_filter(Targets::new().with_default(Level::TRACE));
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
|
||||
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
|
||||
|
||||
@@ -118,6 +111,7 @@ pub async fn run_main(
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(stderr_fmt)
|
||||
.with(feedback_layer)
|
||||
.with(feedback_metadata_layer)
|
||||
.with(otel_logger_layer)
|
||||
.with(otel_tracing_layer)
|
||||
.try_init();
|
||||
|
||||
@@ -9,12 +9,12 @@ path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
|
||||
@@ -11,7 +11,6 @@ use tokio::process::ChildStdin;
|
||||
use tokio::process::ChildStdout;
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::ArchiveConversationParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
@@ -49,7 +48,6 @@ use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use std::process::Command as StdCommand;
|
||||
use tokio::process::Command;
|
||||
|
||||
pub struct McpProcess {
|
||||
@@ -78,12 +76,8 @@ impl McpProcess {
|
||||
codex_home: &Path,
|
||||
env_overrides: &[(&str, Option<&str>)],
|
||||
) -> anyhow::Result<Self> {
|
||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||
let std_cmd = StdCommand::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-mcp-server")?;
|
||||
|
||||
let program = std_cmd.get_program().to_owned();
|
||||
|
||||
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-app-server")?;
|
||||
let mut cmd = Command::new(program);
|
||||
|
||||
cmd.stdin(Stdio::piped());
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::openai_models::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_core::models_manager::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -25,7 +23,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
@@ -37,11 +34,11 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// todo(aibrahim): fix the priorities to be the opposite here.
|
||||
/// Write a models_cache.json file to the codex home directory.
|
||||
/// This prevents ModelsManager from making network requests to refresh models.
|
||||
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
||||
|
||||
@@ -305,6 +305,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
// Acknowledge sendUserTurn
|
||||
@@ -418,6 +419,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -443,6 +445,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -7,6 +7,7 @@ mod fuzzy_file_search;
|
||||
mod interrupt;
|
||||
mod list_resume;
|
||||
mod login;
|
||||
mod output_schema;
|
||||
mod send_message;
|
||||
mod set_default_model;
|
||||
mod user_agent;
|
||||
|
||||
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
@@ -0,0 +1,282 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::InputItem;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SendUserTurnParams;
|
||||
use codex_app_server_protocol::SendUserTurnResponse;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_accepts_output_schema_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let send_turn_id_2 = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp_2: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id_2)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -18,6 +18,7 @@ use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
@@ -73,9 +74,7 @@ sandbox_mode = "workspace-write"
|
||||
}
|
||||
);
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::SessionFlags);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -137,9 +136,7 @@ view_image = false
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::SessionFlags);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -211,7 +208,7 @@ writable_roots = [{}]
|
||||
assert_eq!(config.model.as_deref(), Some("gpt-system"));
|
||||
assert_eq!(
|
||||
origins.get("model").expect("origin").name,
|
||||
ConfigLayerSource::System {
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone(),
|
||||
}
|
||||
);
|
||||
@@ -219,7 +216,7 @@ writable_roots = [{}]
|
||||
assert_eq!(config.approval_policy, Some(AskForApproval::Never));
|
||||
assert_eq!(
|
||||
origins.get("approval_policy").expect("origin").name,
|
||||
ConfigLayerSource::System {
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone(),
|
||||
}
|
||||
);
|
||||
@@ -242,7 +239,7 @@ writable_roots = [{}]
|
||||
.get("sandbox_workspace_write.writable_roots.0")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerSource::System {
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone(),
|
||||
}
|
||||
);
|
||||
@@ -259,28 +256,23 @@ writable_roots = [{}]
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::System { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::SessionFlags);
|
||||
assert_eq!(layers[2].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_value_write_replaces_value() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let temp_dir = TempDir::new()?;
|
||||
let codex_home = temp_dir.path().canonicalize()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
&temp_dir,
|
||||
r#"
|
||||
model = "gpt-old"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let read_id = mcp
|
||||
@@ -311,13 +303,7 @@ model = "gpt-old"
|
||||
)
|
||||
.await??;
|
||||
let write: ConfigWriteResponse = to_response(write_resp)?;
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?;
|
||||
|
||||
assert_eq!(write.status, WriteStatus::Ok);
|
||||
assert_eq!(write.file_path, expected_file_path);
|
||||
@@ -380,16 +366,17 @@ model = "gpt-old"
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(&codex_home, "")?;
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let codex_home = tmp_dir.path().canonicalize()?;
|
||||
write_config(&tmp_dir, "")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let writable_root = test_tmp_path_buf();
|
||||
let batch_id = mcp
|
||||
.send_config_batch_write_request(ConfigBatchWriteParams {
|
||||
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
||||
file_path: Some(codex_home.join("config.toml").display().to_string()),
|
||||
edits: vec![
|
||||
ConfigEdit {
|
||||
key_path: "sandbox_mode".to_string(),
|
||||
@@ -415,13 +402,7 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
.await??;
|
||||
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
|
||||
assert_eq!(batch_write.status, WriteStatus::Ok);
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?;
|
||||
assert_eq!(batch_write.file_path, expected_file_path);
|
||||
|
||||
let read_id = mcp
|
||||
@@ -446,3 +427,50 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[1].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_managed_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
managed_file: AbsolutePathBuf,
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[2].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
mod account;
|
||||
mod config_rpc;
|
||||
mod model_list;
|
||||
mod output_schema;
|
||||
mod rate_limits;
|
||||
mod review;
|
||||
mod thread_archive;
|
||||
|
||||
@@ -47,33 +47,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(response)?;
|
||||
|
||||
let expected_models = vec![
|
||||
Model {
|
||||
id: "gpt-5.1".to_string(),
|
||||
model: "gpt-5.1".to_string(),
|
||||
display_name: "gpt-5.1".to_string(),
|
||||
description: "Broad world knowledge with strong general reasoning.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: true,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
@@ -105,7 +78,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
is_default: true,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-mini".to_string(),
|
||||
@@ -127,33 +100,37 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex".to_string(),
|
||||
model: "gpt-5.1-codex".to_string(),
|
||||
display_name: "gpt-5.1-codex".to_string(),
|
||||
description: "Optimized for codex.".to_string(),
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
model: "gpt-5.1-codex-max".to_string(),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Fastest responses with limited reasoning".to_string(),
|
||||
description: "Fast responses with lighter reasoning".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
||||
description: "Balances speed and reasoning depth for everyday tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
model: "gpt-5.1-codex-max".to_string(),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
id: "gpt-5.2-codex".to_string(),
|
||||
model: "gpt-5.2-codex".to_string(),
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
@@ -210,7 +187,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(first_response)?;
|
||||
|
||||
assert_eq!(first_items.len(), 1);
|
||||
assert_eq!(first_items[0].id, "gpt-5.1");
|
||||
assert_eq!(first_items[0].id, "gpt-5.2");
|
||||
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
||||
|
||||
let second_request = mcp
|
||||
@@ -232,7 +209,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(second_response)?;
|
||||
|
||||
assert_eq!(second_items.len(), 1);
|
||||
assert_eq!(second_items[0].id, "gpt-5.2");
|
||||
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
|
||||
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
|
||||
|
||||
let third_request = mcp
|
||||
@@ -254,7 +231,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(third_response)?;
|
||||
|
||||
assert_eq!(third_items.len(), 1);
|
||||
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
|
||||
assert_eq!(third_items[0].id, "gpt-5.1-codex-max");
|
||||
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
|
||||
|
||||
let fourth_request = mcp
|
||||
@@ -276,30 +253,8 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(fourth_response)?;
|
||||
|
||||
assert_eq!(fourth_items.len(), 1);
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.1-codex");
|
||||
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
|
||||
|
||||
let fifth_request = mcp
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fifth_cursor.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let fifth_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let ModelListResponse {
|
||||
data: fifth_items,
|
||||
next_cursor: fifth_cursor,
|
||||
} = to_response::<ModelListResponse>(fifth_response)?;
|
||||
|
||||
assert_eq!(fifth_items.len(), 1);
|
||||
assert_eq!(fifth_items[0].id, "gpt-5.1-codex-max");
|
||||
assert!(fifth_cursor.is_none());
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2-codex");
|
||||
assert!(fourth_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_accepts_output_schema_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req_1 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_1: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_1)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_1)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let turn_req_2 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
output_schema: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_2: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_2)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_2)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -540,6 +540,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -566,6 +567,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -25,5 +25,6 @@ tree-sitter-bash = { workspace = true }
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use assert_cmd::Command;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn apply_patch_command() -> anyhow::Result<Command> {
|
||||
Ok(Command::new(codex_utils_cargo_bin::cargo_bin(
|
||||
"apply_patch",
|
||||
)?))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -16,8 +21,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -34,8 +38,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -59,10 +62,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
@@ -77,10 +79,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
@@ -9,7 +8,8 @@ use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
|
||||
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
|
||||
let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios");
|
||||
for scenario in fs::read_dir(scenarios_dir)? {
|
||||
let scenario = scenario?;
|
||||
let path = scenario.path();
|
||||
if path.is_dir() {
|
||||
@@ -36,7 +36,7 @@ fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
|
||||
// Run apply_patch in the temporary directory. We intentionally do not assert
|
||||
// on the exit status here; the scenarios are specified purely in terms of
|
||||
// final filesystem state, which we compare below.
|
||||
Command::cargo_bin("apply_patch")?
|
||||
Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?)
|
||||
.arg(patch)
|
||||
.current_dir(tmp.path())
|
||||
.output()?;
|
||||
@@ -82,11 +82,15 @@ fn snapshot_dir_recursive(
|
||||
continue;
|
||||
};
|
||||
let rel = stripped.to_path_buf();
|
||||
let file_type = entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
|
||||
// Under Buck2, files in `__srcs` are often materialized as symlinks.
|
||||
// Use `metadata()` (follows symlinks) so our fixture snapshots work
|
||||
// under both Cargo and Buck2.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
entries.insert(rel.clone(), Entry::Dir);
|
||||
snapshot_dir_recursive(base, &path, entries)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
let contents = fs::read(&path)?;
|
||||
entries.insert(rel, Entry::File(contents));
|
||||
}
|
||||
@@ -98,12 +102,14 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let file_type = entry.file_type()?;
|
||||
let dest_path = dst.join(entry.file_name());
|
||||
if file_type.is_dir() {
|
||||
|
||||
// See note in `snapshot_dir_recursive` about Buck2 symlink trees.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
fs::create_dir_all(&dest_path)?;
|
||||
copy_dir_recursive(&path, &dest_path)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use std::path::Path;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result<assert_cmd::assert::Assert> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd.arg(patch).assert())
|
||||
}
|
||||
|
||||
fn apply_patch_command(dir: &Path) -> anyhow::Result<Command> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::path::PathBuf;
|
||||
use clap::Parser;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
|
||||
use crate::chatgpt_token::init_chatgpt_token_from_auth;
|
||||
use crate::get_task::GetTaskResponse;
|
||||
@@ -28,7 +27,6 @@ pub async fn run_apply_command(
|
||||
.config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?,
|
||||
ConfigOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -37,13 +37,13 @@ codex-rmcp-client = { workspace = true }
|
||||
codex-stdio-to-uds = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
codex-tui2 = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
regex-lite = { workspace = true}
|
||||
regex-lite = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
supports-color = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -51,6 +51,7 @@ tokio = { workspace = true, features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
toml = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
@@ -59,6 +60,7 @@ codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -109,7 +109,7 @@ async fn run_command_under_sandbox(
|
||||
log_denials: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let sandbox_mode = create_sandbox_mode(full_auto);
|
||||
let config = Config::load_with_cli_overrides(
|
||||
let config = Config::load_with_cli_overrides_and_harness_overrides(
|
||||
config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?,
|
||||
@@ -140,7 +140,7 @@ async fn run_command_under_sandbox(
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
|
||||
|
||||
let policy_str = serde_json::to_string(&config.sandbox_policy)?;
|
||||
let policy_str = serde_json::to_string(config.sandbox_policy.get())?;
|
||||
|
||||
let sandbox_cwd = sandbox_policy_cwd.clone();
|
||||
let cwd_clone = cwd.clone();
|
||||
@@ -216,7 +216,7 @@ async fn run_command_under_sandbox(
|
||||
spawn_command_under_seatbelt(
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
config.sandbox_policy.get(),
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -232,7 +232,7 @@ async fn run_command_under_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
config.sandbox_policy.get(),
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
|
||||
@@ -6,7 +6,6 @@ use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::auth::logout;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::run_device_code_login;
|
||||
use codex_login::run_login_server;
|
||||
@@ -210,8 +209,7 @@ async fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config
|
||||
}
|
||||
};
|
||||
|
||||
let config_overrides = ConfigOverrides::default();
|
||||
match Config::load_with_cli_overrides(cli_overrides, config_overrides).await {
|
||||
match Config::load_with_cli_overrides(cli_overrides).await {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Error loading configuration: {e}");
|
||||
|
||||
@@ -44,6 +44,7 @@ use codex_core::features::Feature;
|
||||
use codex_core::features::FeatureOverrides;
|
||||
use codex_core::features::Features;
|
||||
use codex_core::features::is_known_feature_key;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
|
||||
/// Codex CLI
|
||||
///
|
||||
@@ -631,7 +632,11 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides).await?;
|
||||
let config = Config::load_with_cli_overrides_and_harness_overrides(
|
||||
cli_kv_overrides,
|
||||
overrides,
|
||||
)
|
||||
.await?;
|
||||
for def in codex_core::features::FEATURES.iter() {
|
||||
let name = def.key;
|
||||
let stage = stage_str(def.stage);
|
||||
@@ -683,7 +688,13 @@ async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> {
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
|
||||
|
||||
let codex_home = find_codex_home()?;
|
||||
let config_toml = load_config_as_toml_with_cli_overrides(&codex_home, cli_kv_overrides).await?;
|
||||
let cwd = cli.cwd.clone();
|
||||
let config_cwd = match cwd.as_deref() {
|
||||
Some(path) => AbsolutePathBuf::from_absolute_path(path)?,
|
||||
None => AbsolutePathBuf::current_dir()?,
|
||||
};
|
||||
let config_toml =
|
||||
load_config_as_toml_with_cli_overrides(&codex_home, &config_cwd, cli_kv_overrides).await?;
|
||||
let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?;
|
||||
let overrides = FeatureOverrides::default();
|
||||
let features = Features::from_config(&config_toml, &config_profile, overrides);
|
||||
|
||||
@@ -8,21 +8,17 @@ use clap::ArgGroup;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_common::format_env_display::format_env_display;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::mcp::auth::compute_auth_statuses;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_rmcp_client::delete_oauth_tokens;
|
||||
use codex_rmcp_client::perform_oauth_login;
|
||||
use codex_rmcp_client::supports_oauth_login;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
/// Subcommands:
|
||||
/// - `serve` — run the MCP server on stdio
|
||||
/// - `list` — list configured servers (with `--json`)
|
||||
@@ -40,24 +36,11 @@ pub struct McpCli {
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum McpSubcommand {
|
||||
/// [experimental] List configured MCP servers.
|
||||
List(ListArgs),
|
||||
|
||||
/// [experimental] Show details for a configured MCP server.
|
||||
Get(GetArgs),
|
||||
|
||||
/// [experimental] Add a global MCP server entry.
|
||||
Add(AddArgs),
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
|
||||
/// [experimental] Authenticate with a configured MCP server via OAuth.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Login(LoginArgs),
|
||||
|
||||
/// [experimental] Remove stored OAuth credentials for a server.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Logout(LogoutArgs),
|
||||
}
|
||||
|
||||
@@ -200,7 +183,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
let config = Config::load_with_cli_overrides(overrides)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
@@ -283,24 +266,17 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
{
|
||||
match supports_oauth_login(&url).await {
|
||||
Ok(true) => {
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
println!(
|
||||
"MCP server supports login. Add `features.rmcp_client = true` \
|
||||
to your config.toml and run `codex mcp login {name}` to login."
|
||||
);
|
||||
} else {
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(_) => println!(
|
||||
@@ -349,16 +325,10 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
let config = Config::load_with_cli_overrides(overrides)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
bail!(
|
||||
"OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
|
||||
);
|
||||
}
|
||||
|
||||
let LoginArgs { name, scopes } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
@@ -392,7 +362,7 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
let config = Config::load_with_cli_overrides(overrides)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
@@ -421,7 +391,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) ->
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
let config = Config::load_with_cli_overrides(overrides)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
@@ -678,7 +648,7 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
|
||||
let overrides = config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
let config = Config::load_with_cli_overrides(overrides)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ prefix_rule(
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::cargo_bin("codex")?
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
|
||||
@@ -8,7 +8,7 @@ use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@ unicode-width = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
supports-color = { workspace = true }
|
||||
|
||||
[dependencies.async-trait]
|
||||
workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
async-trait = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -34,10 +34,6 @@ pub struct ExecCommand {
|
||||
#[arg(long = "env", value_name = "ENV_ID")]
|
||||
pub environment: String,
|
||||
|
||||
/// Git branch to run in Codex Cloud.
|
||||
#[arg(long = "branch", value_name = "BRANCH", default_value = "main")]
|
||||
pub branch: String,
|
||||
|
||||
/// Number of assistant attempts (best-of-N).
|
||||
#[arg(
|
||||
long = "attempts",
|
||||
@@ -45,6 +41,10 @@ pub struct ExecCommand {
|
||||
value_parser = parse_attempts
|
||||
)]
|
||||
pub attempts: usize,
|
||||
|
||||
/// Git branch to run in Codex Cloud (defaults to current branch).
|
||||
#[arg(long = "branch", value_name = "BRANCH")]
|
||||
pub branch: Option<String>,
|
||||
}
|
||||
|
||||
fn parse_attempts(input: &str) -> Result<usize, String> {
|
||||
|
||||
@@ -104,6 +104,54 @@ async fn init_backend(user_agent_suffix: &str) -> anyhow::Result<BackendContext>
|
||||
})
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
trait GitInfoProvider {
|
||||
async fn default_branch_name(&self, path: &std::path::Path) -> Option<String>;
|
||||
|
||||
async fn current_branch_name(&self, path: &std::path::Path) -> Option<String>;
|
||||
}
|
||||
|
||||
struct RealGitInfo;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl GitInfoProvider for RealGitInfo {
|
||||
async fn default_branch_name(&self, path: &std::path::Path) -> Option<String> {
|
||||
codex_core::git_info::default_branch_name(path).await
|
||||
}
|
||||
|
||||
async fn current_branch_name(&self, path: &std::path::Path) -> Option<String> {
|
||||
codex_core::git_info::current_branch_name(path).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn resolve_git_ref(branch_override: Option<&String>) -> String {
|
||||
resolve_git_ref_with_git_info(branch_override, &RealGitInfo).await
|
||||
}
|
||||
|
||||
async fn resolve_git_ref_with_git_info(
|
||||
branch_override: Option<&String>,
|
||||
git_info: &impl GitInfoProvider,
|
||||
) -> String {
|
||||
if let Some(branch) = branch_override {
|
||||
let branch = branch.trim();
|
||||
if !branch.is_empty() {
|
||||
return branch.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(cwd) = std::env::current_dir() {
|
||||
if let Some(branch) = git_info.current_branch_name(&cwd).await {
|
||||
branch
|
||||
} else if let Some(branch) = git_info.default_branch_name(&cwd).await {
|
||||
branch
|
||||
} else {
|
||||
"main".to_string()
|
||||
}
|
||||
} else {
|
||||
"main".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
|
||||
let crate::cli::ExecCommand {
|
||||
query,
|
||||
@@ -114,11 +162,12 @@ async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
|
||||
let ctx = init_backend("codex_cloud_tasks_exec").await?;
|
||||
let prompt = resolve_query_input(query)?;
|
||||
let env_id = resolve_environment_id(&ctx, &environment).await?;
|
||||
let git_ref = resolve_git_ref(branch.as_ref()).await;
|
||||
let created = codex_cloud_tasks_client::CloudBackend::create_task(
|
||||
&*ctx.backend,
|
||||
&env_id,
|
||||
&prompt,
|
||||
&branch,
|
||||
&git_ref,
|
||||
false,
|
||||
attempts,
|
||||
)
|
||||
@@ -1362,17 +1411,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> an
|
||||
let backend = Arc::clone(&backend);
|
||||
let best_of_n = page.best_of_n;
|
||||
tokio::spawn(async move {
|
||||
let git_ref = if let Ok(cwd) = std::env::current_dir() {
|
||||
if let Some(branch) = codex_core::git_info::default_branch_name(&cwd).await {
|
||||
branch
|
||||
} else if let Some(branch) = codex_core::git_info::current_branch_name(&cwd).await {
|
||||
branch
|
||||
} else {
|
||||
"main".to_string()
|
||||
}
|
||||
} else {
|
||||
"main".to_string()
|
||||
};
|
||||
let git_ref = resolve_git_ref(None).await;
|
||||
|
||||
let result = codex_cloud_tasks_client::CloudBackend::create_task(&*backend, &env, &text, &git_ref, false, best_of_n).await;
|
||||
let evt = match result {
|
||||
@@ -1991,6 +2030,7 @@ fn pretty_lines_from_error(raw: &str) -> Vec<String> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::resolve_git_ref_with_git_info;
|
||||
use codex_cloud_tasks_client::DiffSummary;
|
||||
use codex_cloud_tasks_client::MockClient;
|
||||
use codex_cloud_tasks_client::TaskId;
|
||||
@@ -2005,6 +2045,85 @@ mod tests {
|
||||
use ratatui::buffer::Buffer;
|
||||
use ratatui::layout::Rect;
|
||||
|
||||
struct StubGitInfo {
|
||||
default_branch: Option<String>,
|
||||
current_branch: Option<String>,
|
||||
}
|
||||
|
||||
impl StubGitInfo {
|
||||
fn new(default_branch: Option<String>, current_branch: Option<String>) -> Self {
|
||||
Self {
|
||||
default_branch,
|
||||
current_branch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl super::GitInfoProvider for StubGitInfo {
|
||||
async fn default_branch_name(&self, _path: &std::path::Path) -> Option<String> {
|
||||
self.default_branch.clone()
|
||||
}
|
||||
|
||||
async fn current_branch_name(&self, _path: &std::path::Path) -> Option<String> {
|
||||
self.current_branch.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn branch_override_is_used_when_provided() {
|
||||
let git_ref = resolve_git_ref_with_git_info(
|
||||
Some(&"feature/override".to_string()),
|
||||
&StubGitInfo::new(None, None),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(git_ref, "feature/override");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn trims_override_whitespace() {
|
||||
let git_ref = resolve_git_ref_with_git_info(
|
||||
Some(&" feature/spaces ".to_string()),
|
||||
&StubGitInfo::new(None, None),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(git_ref, "feature/spaces");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn prefers_current_branch_when_available() {
|
||||
let git_ref = resolve_git_ref_with_git_info(
|
||||
None,
|
||||
&StubGitInfo::new(
|
||||
Some("default-main".to_string()),
|
||||
Some("feature/current".to_string()),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(git_ref, "feature/current");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn falls_back_to_current_branch_when_default_is_missing() {
|
||||
let git_ref = resolve_git_ref_with_git_info(
|
||||
None,
|
||||
&StubGitInfo::new(None, Some("develop".to_string())),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(git_ref, "develop");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn falls_back_to_main_when_no_git_info_is_available() {
|
||||
let git_ref = resolve_git_ref_with_git_info(None, &StubGitInfo::new(None, None)).await;
|
||||
|
||||
assert_eq!(git_ref, "main");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_task_status_lines_with_diff_and_label() {
|
||||
let now = Utc::now();
|
||||
|
||||
@@ -5,7 +5,6 @@ use chrono::Utc;
|
||||
use reqwest::header::HeaderMap;
|
||||
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_login::AuthManager;
|
||||
|
||||
pub fn set_user_agent_suffix(suffix: &str) {
|
||||
@@ -62,9 +61,7 @@ pub fn extract_chatgpt_account_id(token: &str) -> Option<String> {
|
||||
|
||||
pub async fn load_auth_manager() -> Option<AuthManager> {
|
||||
// TODO: pass in cli overrides once cloud tasks properly support them.
|
||||
let config = Config::load_with_cli_overrides(Vec::new(), ConfigOverrides::default())
|
||||
.await
|
||||
.ok()?;
|
||||
let config = Config::load_with_cli_overrides(Vec::new()).await.ok()?;
|
||||
Some(AuthManager::new(
|
||||
config.codex_home,
|
||||
false,
|
||||
|
||||
@@ -59,6 +59,7 @@ pub enum ResponseEvent {
|
||||
summary_index: i64,
|
||||
},
|
||||
RateLimits(RateLimitSnapshot),
|
||||
ModelsEtag(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
|
||||
@@ -152,6 +152,9 @@ impl Stream for AggregatedStream {
|
||||
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::provider::Provider;
|
||||
use crate::telemetry::run_with_request_telemetry;
|
||||
use codex_client::HttpTransport;
|
||||
use codex_client::RequestTelemetry;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -41,7 +42,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
&self,
|
||||
client_version: &str,
|
||||
extra_headers: HeaderMap,
|
||||
) -> Result<ModelsResponse, ApiError> {
|
||||
) -> Result<(Vec<ModelInfo>, Option<String>), ApiError> {
|
||||
let builder = || {
|
||||
let mut req = self.provider.build_request(Method::GET, self.path());
|
||||
req.headers.extend(extra_headers.clone());
|
||||
@@ -66,7 +67,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
|
||||
let ModelsResponse { models, etag } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
let ModelsResponse { models } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
.map_err(|e| {
|
||||
ApiError::Stream(format!(
|
||||
"failed to decode models response: {e}; body: {}",
|
||||
@@ -74,9 +75,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
))
|
||||
})?;
|
||||
|
||||
let etag = header_etag.unwrap_or(etag);
|
||||
|
||||
Ok(ModelsResponse { models, etag })
|
||||
Ok((models, header_etag))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,16 +101,15 @@ mod tests {
|
||||
struct CapturingTransport {
|
||||
last_request: Arc<Mutex<Option<Request>>>,
|
||||
body: Arc<ModelsResponse>,
|
||||
etag: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CapturingTransport {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
}),
|
||||
body: Arc::new(ModelsResponse { models: Vec::new() }),
|
||||
etag: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,8 +120,8 @@ mod tests {
|
||||
*self.last_request.lock().unwrap() = Some(req);
|
||||
let body = serde_json::to_vec(&*self.body).unwrap();
|
||||
let mut headers = HeaderMap::new();
|
||||
if !self.body.etag.is_empty() {
|
||||
headers.insert(ETAG, self.body.etag.parse().unwrap());
|
||||
if let Some(etag) = &self.etag {
|
||||
headers.insert(ETAG, etag.parse().unwrap());
|
||||
}
|
||||
Ok(Response {
|
||||
status: StatusCode::OK,
|
||||
@@ -166,14 +164,12 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn appends_client_version_query() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -182,12 +178,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(models.len(), 0);
|
||||
|
||||
let url = transport
|
||||
.last_request
|
||||
@@ -227,17 +223,16 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.unwrap(),
|
||||
],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -246,27 +241,25 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(result.models[0].supported_in_api, true);
|
||||
assert_eq!(result.models[0].priority, 1);
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
assert_eq!(models[0].supported_in_api, true);
|
||||
assert_eq!(models[0].priority, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_includes_etag() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: "\"abc\"".to_string(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: Some("\"abc\"".to_string()),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -275,12 +268,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, etag) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(result.etag, "\"abc\"");
|
||||
assert_eq!(models.len(), 0);
|
||||
assert_eq!(etag, Some("\"abc\"".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,24 +204,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::LocalShellCall {
|
||||
id,
|
||||
@@ -229,22 +221,14 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
status,
|
||||
action,
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let content_value = if let Some(items) = &output.content_items {
|
||||
@@ -277,18 +261,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
let tool_call = json!({
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
});
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
@@ -328,11 +310,50 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn push_tool_call_message(messages: &mut Vec<Value>, tool_call: Value, reasoning: Option<&str>) {
|
||||
// Chat Completions requires that tool calls are grouped into a single assistant message
|
||||
// (with `tool_calls: [...]`) followed by tool role responses.
|
||||
if let Some(Value::Object(obj)) = messages.last_mut()
|
||||
&& obj.get("role").and_then(Value::as_str) == Some("assistant")
|
||||
&& obj.get("content").is_some_and(Value::is_null)
|
||||
&& let Some(tool_calls) = obj.get_mut("tool_calls").and_then(Value::as_array_mut)
|
||||
{
|
||||
tool_calls.push(tool_call);
|
||||
if let Some(reasoning) = reasoning {
|
||||
if let Some(Value::String(existing)) = obj.get_mut("reasoning") {
|
||||
if !existing.is_empty() {
|
||||
existing.push('\n');
|
||||
}
|
||||
existing.push_str(reasoning);
|
||||
} else {
|
||||
obj.insert(
|
||||
"reasoning".to_string(),
|
||||
Value::String(reasoning.to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [tool_call],
|
||||
});
|
||||
if let Some(reasoning) = reasoning
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::RetryConfig;
|
||||
use crate::provider::WireApi;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use http::HeaderValue;
|
||||
@@ -385,4 +406,89 @@ mod tests {
|
||||
Some(&HeaderValue::from_static("review"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn groups_consecutive_tool_calls_into_a_single_assistant_message() {
|
||||
let prompt_input = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "read these".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"a.txt"}"#.to_string(),
|
||||
call_id: "call-a".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"b.txt"}"#.to_string(),
|
||||
call_id: "call-b".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"c.txt"}"#.to_string(),
|
||||
call_id: "call-c".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-a".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "A".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-b".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "B".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-c".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "C".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
|
||||
.build(&provider())
|
||||
.expect("request");
|
||||
|
||||
let messages = req
|
||||
.body
|
||||
.get("messages")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("messages array");
|
||||
// system + user + assistant(tool_calls=[...]) + 3 tool outputs
|
||||
assert_eq!(messages.len(), 6);
|
||||
|
||||
assert_eq!(messages[0]["role"], "system");
|
||||
assert_eq!(messages[1]["role"], "user");
|
||||
|
||||
let tool_calls_msg = &messages[2];
|
||||
assert_eq!(tool_calls_msg["role"], "assistant");
|
||||
assert_eq!(tool_calls_msg["content"], serde_json::Value::Null);
|
||||
let tool_calls = tool_calls_msg["tool_calls"]
|
||||
.as_array()
|
||||
.expect("tool_calls array");
|
||||
assert_eq!(tool_calls.len(), 3);
|
||||
assert_eq!(tool_calls[0]["id"], "call-a");
|
||||
assert_eq!(tool_calls[1]["id"], "call-b");
|
||||
assert_eq!(tool_calls[2]["id"], "call-c");
|
||||
|
||||
assert_eq!(messages[3]["role"], "tool");
|
||||
assert_eq!(messages[3]["tool_call_id"], "call-a");
|
||||
assert_eq!(messages[4]["role"], "tool");
|
||||
assert_eq!(messages[4]["tool_call_id"], "call-b");
|
||||
assert_eq!(messages[5]["role"], "tool");
|
||||
assert_eq!(messages[5]["tool_call_id"], "call-c");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,21 @@ pub(crate) fn spawn_chat_stream(
|
||||
ResponseStream { rx_event }
|
||||
}
|
||||
|
||||
/// Processes Server-Sent Events from the legacy Chat Completions streaming API.
|
||||
///
|
||||
/// The upstream protocol terminates a streaming response with a final sentinel event
|
||||
/// (`data: [DONE]`). Historically, some of our test stubs have emitted `data: DONE`
|
||||
/// (without brackets) instead.
|
||||
///
|
||||
/// `eventsource_stream` delivers these sentinels as regular events rather than signaling
|
||||
/// end-of-stream. If we try to parse them as JSON, we log and skip them, then keep
|
||||
/// polling for more events.
|
||||
///
|
||||
/// On servers that keep the HTTP connection open after emitting the sentinel (notably
|
||||
/// wiremock on Windows), skipping the sentinel means we never emit `ResponseEvent::Completed`.
|
||||
/// Higher-level workflows/tests that wait for completion before issuing subsequent model
|
||||
/// calls will then stall, which shows up as "expected N requests, got 1" verification
|
||||
/// failures in the mock server.
|
||||
pub async fn process_chat_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
@@ -57,6 +72,31 @@ pub async fn process_chat_sse<S>(
|
||||
let mut reasoning_item: Option<ResponseItem> = None;
|
||||
let mut completed_sent = false;
|
||||
|
||||
async fn flush_and_complete(
|
||||
tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
reasoning_item: &mut Option<ResponseItem>,
|
||||
assistant_item: &mut Option<ResponseItem>,
|
||||
) {
|
||||
if let Some(reasoning) = reasoning_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
@@ -70,24 +110,8 @@ pub async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
if let Some(reasoning) = reasoning_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
if !completed_sent {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -101,16 +125,25 @@ pub async fn process_chat_sse<S>(
|
||||
|
||||
trace!("SSE event: {}", sse.data);
|
||||
|
||||
if sse.data.trim().is_empty() {
|
||||
let data = sse.data.trim();
|
||||
|
||||
if data.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(&sse.data) {
|
||||
if data == "[DONE]" || data == "DONE" {
|
||||
if !completed_sent {
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(data) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"Failed to parse ChatCompletions SSE event: {err}, data: {}",
|
||||
&sse.data
|
||||
data
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@@ -362,6 +395,16 @@ mod tests {
|
||||
body
|
||||
}
|
||||
|
||||
/// Regression test: the stream should complete when we see a `[DONE]` sentinel.
|
||||
///
|
||||
/// This is important for tests/mocks that don't immediately close the underlying
|
||||
/// connection after emitting the sentinel.
|
||||
#[tokio::test]
|
||||
async fn completes_on_done_sentinel_without_json() {
|
||||
let events = collect_events("event: message\ndata: [DONE]\n\n").await;
|
||||
assert_matches!(&events[..], [ResponseEvent::Completed { .. }]);
|
||||
}
|
||||
|
||||
async fn collect_events(body: &str) -> Vec<ResponseEvent> {
|
||||
let reader = ReaderStream::new(std::io::Cursor::new(body.to_string()))
|
||||
.map_err(|err| codex_client::TransportError::Network(err.to_string()));
|
||||
|
||||
@@ -51,11 +51,19 @@ pub fn spawn_response_stream(
|
||||
telemetry: Option<Arc<dyn SseTelemetry>>,
|
||||
) -> ResponseStream {
|
||||
let rate_limits = parse_rate_limit(&stream_response.headers);
|
||||
let models_etag = stream_response
|
||||
.headers
|
||||
.get("X-Models-Etag")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
|
||||
tokio::spawn(async move {
|
||||
if let Some(snapshot) = rate_limits {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await;
|
||||
}
|
||||
if let Some(etag) = models_etag {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
|
||||
}
|
||||
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
|
||||
});
|
||||
|
||||
|
||||
@@ -4,14 +4,12 @@ use codex_api::provider::Provider;
|
||||
use codex_api::provider::RetryConfig;
|
||||
use codex_api::provider::WireApi;
|
||||
use codex_client::ReqwestTransport;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -75,7 +73,6 @@ async fn models_client_hits_models_endpoint() {
|
||||
],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
@@ -87,10 +84,8 @@ async fn models_client_hits_models_endpoint() {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
Mock::given(method("GET"))
|
||||
@@ -106,13 +101,13 @@ async fn models_client_hits_models_endpoint() {
|
||||
let transport = ReqwestTransport::new(reqwest::Client::new());
|
||||
let client = ModelsClient::new(transport, provider(&base_url), DummyAuth);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("models request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
|
||||
let received = server
|
||||
.received_requests()
|
||||
|
||||
@@ -69,6 +69,15 @@ impl ReqwestTransport {
|
||||
#[async_trait]
|
||||
impl HttpTransport for ReqwestTransport {
|
||||
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
|
||||
if enabled!(Level::TRACE) {
|
||||
trace!(
|
||||
"{} to {}: {}",
|
||||
req.method,
|
||||
req.url,
|
||||
req.body.as_ref().unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
let builder = self.build(req)?;
|
||||
let resp = builder.send().await.map_err(Self::map_error)?;
|
||||
let status = resp.status();
|
||||
|
||||
@@ -21,3 +21,10 @@ toml = { workspace = true, optional = true }
|
||||
cli = ["clap", "serde", "toml"]
|
||||
elapsed = []
|
||||
sandbox_summary = []
|
||||
|
||||
[dev-dependencies]
|
||||
clap = { workspace = true, features = ["derive", "wrap_help"] }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
|
||||
@@ -10,7 +10,10 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta
|
||||
("model", model.to_string()),
|
||||
("provider", config.model_provider_id.clone()),
|
||||
("approval", config.approval_policy.value().to_string()),
|
||||
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
|
||||
(
|
||||
"sandbox",
|
||||
summarize_sandbox_policy(config.sandbox_policy.get()),
|
||||
),
|
||||
];
|
||||
if config.model_provider.wire_api == WireApi::Responses {
|
||||
let reasoning_effort = config
|
||||
|
||||
@@ -26,3 +26,22 @@ impl From<SandboxModeCliArg> for SandboxMode {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn maps_cli_args_to_protocol_modes() {
|
||||
assert_eq!(SandboxMode::ReadOnly, SandboxModeCliArg::ReadOnly.into());
|
||||
assert_eq!(
|
||||
SandboxMode::WorkspaceWrite,
|
||||
SandboxModeCliArg::WorkspaceWrite.into()
|
||||
);
|
||||
assert_eq!(
|
||||
SandboxMode::DangerFullAccess,
|
||||
SandboxModeCliArg::DangerFullAccess.into()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
use codex_core::protocol::NetworkAccess;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
|
||||
pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String {
|
||||
match sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess => "danger-full-access".to_string(),
|
||||
SandboxPolicy::ReadOnly => "read-only".to_string(),
|
||||
SandboxPolicy::ExternalSandbox { network_access } => {
|
||||
let mut summary = "external-sandbox".to_string();
|
||||
if matches!(network_access, NetworkAccess::Enabled) {
|
||||
summary.push_str(" (network access enabled)");
|
||||
}
|
||||
summary
|
||||
}
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots,
|
||||
network_access,
|
||||
@@ -34,3 +42,45 @@ pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn summarizes_external_sandbox_without_network_access_suffix() {
|
||||
let summary = summarize_sandbox_policy(&SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
});
|
||||
assert_eq!(summary, "external-sandbox");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn summarizes_external_sandbox_with_enabled_network() {
|
||||
let summary = summarize_sandbox_policy(&SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Enabled,
|
||||
});
|
||||
assert_eq!(summary, "external-sandbox (network access enabled)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn workspace_write_summary_still_includes_network_access() {
|
||||
let root = if cfg!(windows) { "C:\\repo" } else { "/repo" };
|
||||
let writable_root = AbsolutePathBuf::try_from(root).unwrap();
|
||||
let summary = summarize_sandbox_policy(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![writable_root.clone()],
|
||||
network_access: true,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
});
|
||||
assert_eq!(
|
||||
summary,
|
||||
format!(
|
||||
"workspace-write [workdir, {}] (network access enabled)",
|
||||
writable_root.to_string_lossy()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ workspace = true
|
||||
anyhow = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
base64 = { workspace = true }
|
||||
chardetng = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
@@ -43,6 +44,7 @@ env-flags = { workspace = true }
|
||||
eventsource-stream = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http = { workspace = true }
|
||||
include_dir = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
keyring = { workspace = true, features = ["crypto-rust"] }
|
||||
libc = { workspace = true }
|
||||
@@ -60,7 +62,6 @@ sha1 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
similar = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
test-case = "3.3.1"
|
||||
test-log = { workspace = true }
|
||||
@@ -122,6 +123,7 @@ assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-core = { path = ".", features = ["deterministic_process_ids"] }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
@@ -131,7 +133,6 @@ predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tracing-test = { workspace = true, features = ["no-env-filter"] }
|
||||
walkdir = { workspace = true }
|
||||
|
||||
117
codex-rs/core/gpt-5.2-codex_prompt.md
Normal file
117
codex-rs/core/gpt-5.2-codex_prompt.md
Normal file
@@ -0,0 +1,117 @@
|
||||
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
|
||||
|
||||
## General
|
||||
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
|
||||
## Editing constraints
|
||||
|
||||
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
|
||||
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
|
||||
- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase).
|
||||
- You may be in a dirty git worktree.
|
||||
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
||||
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
||||
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
||||
* If the changes are in unrelated files, just ignore them and don't revert them.
|
||||
- Do not amend a commit unless explicitly requested to do so.
|
||||
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
|
||||
- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user.
|
||||
|
||||
## Plan tool
|
||||
|
||||
When using the planning tool:
|
||||
- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
|
||||
|
||||
## Frontend tasks
|
||||
When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts.
|
||||
Aim for interfaces that feel intentional, bold, and a bit surprising.
|
||||
- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system).
|
||||
- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias.
|
||||
- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions.
|
||||
- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere.
|
||||
- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs.
|
||||
- Ensure the page loads properly on both desktop and mobile
|
||||
|
||||
Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language.
|
||||
|
||||
## Presenting your work and final message
|
||||
|
||||
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
||||
|
||||
- Default: be very concise; friendly coding teammate tone.
|
||||
- Ask only when needed; suggest ideas; mirror the user's style.
|
||||
- For substantial work, summarize clearly; follow final‑answer formatting.
|
||||
- Skip heavy formatting for simple confirmations.
|
||||
- Don't dump large files you've written; reference paths only.
|
||||
- No "save/copy this file" - User is on the same machine.
|
||||
- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
|
||||
- For code changes:
|
||||
* Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
|
||||
* If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
|
||||
* When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
|
||||
- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result.
|
||||
|
||||
### Final answer structure and style guidelines
|
||||
|
||||
- Plain text; CLI handles styling. Use structure only when it helps scanability.
|
||||
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
|
||||
- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent.
|
||||
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
|
||||
- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible.
|
||||
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
|
||||
- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
|
||||
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
|
||||
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
|
||||
- File References: When referencing files in your response follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
File diff suppressed because one or more lines are too long
@@ -636,8 +636,7 @@ mod tests {
|
||||
use crate::auth::storage::FileAuthStorage;
|
||||
use crate::auth::storage::get_auth_file;
|
||||
use crate::config::Config;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::token_data::IdTokenInfo;
|
||||
use crate::token_data::KnownPlan as InternalKnownPlan;
|
||||
use crate::token_data::PlanType as InternalPlanType;
|
||||
@@ -862,17 +861,16 @@ mod tests {
|
||||
Ok(fake_jwt)
|
||||
}
|
||||
|
||||
fn build_config(
|
||||
async fn build_config(
|
||||
codex_home: &Path,
|
||||
forced_login_method: Option<ForcedLoginMethod>,
|
||||
forced_chatgpt_workspace_id: Option<String>,
|
||||
) -> Config {
|
||||
let mut config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
codex_home.to_path_buf(),
|
||||
)
|
||||
.expect("config should load");
|
||||
let mut config = ConfigBuilder::default()
|
||||
.codex_home(codex_home.to_path_buf())
|
||||
.build()
|
||||
.await
|
||||
.expect("config should load");
|
||||
config.forced_login_method = forced_login_method;
|
||||
config.forced_chatgpt_workspace_id = forced_chatgpt_workspace_id;
|
||||
config
|
||||
@@ -915,7 +913,7 @@ mod tests {
|
||||
login_with_api_key(codex_home.path(), "sk-test", AuthCredentialsStoreMode::File)
|
||||
.expect("seed api key");
|
||||
|
||||
let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None);
|
||||
let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None).await;
|
||||
|
||||
let err = super::enforce_login_restrictions(&config)
|
||||
.await
|
||||
@@ -941,7 +939,7 @@ mod tests {
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string()));
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string())).await;
|
||||
|
||||
let err = super::enforce_login_restrictions(&config)
|
||||
.await
|
||||
@@ -967,7 +965,7 @@ mod tests {
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string()));
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string())).await;
|
||||
|
||||
super::enforce_login_restrictions(&config)
|
||||
.await
|
||||
@@ -985,7 +983,7 @@ mod tests {
|
||||
login_with_api_key(codex_home.path(), "sk-test", AuthCredentialsStoreMode::File)
|
||||
.expect("seed api key");
|
||||
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string()));
|
||||
let config = build_config(codex_home.path(), None, Some("org_mine".to_string())).await;
|
||||
|
||||
super::enforce_login_restrictions(&config)
|
||||
.await
|
||||
@@ -1002,7 +1000,7 @@ mod tests {
|
||||
let _guard = EnvVarGuard::set(CODEX_API_KEY_ENV_VAR, "sk-env");
|
||||
let codex_home = tempdir().unwrap();
|
||||
|
||||
let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None);
|
||||
let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None).await;
|
||||
|
||||
let err = super::enforce_login_restrictions(&config)
|
||||
.await
|
||||
|
||||
@@ -46,6 +46,7 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
"string_content",
|
||||
"raw_string",
|
||||
"number",
|
||||
"concatenation",
|
||||
];
|
||||
// Allow only safe punctuation / operator tokens; anything else causes reject.
|
||||
const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"];
|
||||
@@ -158,6 +159,48 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"concatenation" => {
|
||||
// Handle concatenated arguments like -g"*.py"
|
||||
let mut concatenated = String::new();
|
||||
let mut concat_cursor = child.walk();
|
||||
for part in child.named_children(&mut concat_cursor) {
|
||||
match part.kind() {
|
||||
"word" | "number" => {
|
||||
concatenated
|
||||
.push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str());
|
||||
}
|
||||
"string" => {
|
||||
if part.child_count() == 3
|
||||
&& part.child(0)?.kind() == "\""
|
||||
&& part.child(1)?.kind() == "string_content"
|
||||
&& part.child(2)?.kind() == "\""
|
||||
{
|
||||
concatenated.push_str(
|
||||
part.child(1)?
|
||||
.utf8_text(src.as_bytes())
|
||||
.ok()?
|
||||
.to_owned()
|
||||
.as_str(),
|
||||
);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"raw_string" => {
|
||||
let raw_string = part.utf8_text(src.as_bytes()).ok()?;
|
||||
let stripped = raw_string
|
||||
.strip_prefix('\'')
|
||||
.and_then(|s| s.strip_suffix('\''))?;
|
||||
concatenated.push_str(stripped);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
if concatenated.is_empty() {
|
||||
return None;
|
||||
}
|
||||
words.push(concatenated);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -256,4 +299,47 @@ mod tests {
|
||||
let parsed = parse_shell_lc_plain_commands(&command).unwrap();
|
||||
assert_eq!(parsed, vec![vec!["ls".to_string()]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_and_value() {
|
||||
// Test case: -g"*.py" (flag directly concatenated with quoted value)
|
||||
let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"rg".to_string(),
|
||||
"-n".to_string(),
|
||||
"foo".to_string(),
|
||||
"-g*.py".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_with_single_quotes() {
|
||||
let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"grep".to_string(),
|
||||
"-n".to_string(),
|
||||
"pattern".to_string(),
|
||||
"-g*.txt".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_variable_substitution() {
|
||||
// Environment variables in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$VAR\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_command_substitution() {
|
||||
// Command substitution in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ use crate::features::FEATURES;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::tools::spec::create_tools_json_for_chat_completions_api;
|
||||
use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::error::Result;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
pub use codex_api::common::ResponseEvent;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -259,7 +259,7 @@ mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::config::test_config;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ use crate::compact;
|
||||
use crate::compact::run_inline_auto_compact_task;
|
||||
use crate::compact::should_use_remote_compact_task;
|
||||
use crate::compact_remote::run_inline_remote_auto_compact_task;
|
||||
use crate::exec_policy::load_exec_policy_for_features;
|
||||
use crate::exec_policy::ExecPolicyManager;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::parse_command::parse_command;
|
||||
use crate::parse_turn_item;
|
||||
use crate::stream_events_utils::HandleOutputCtx;
|
||||
@@ -78,7 +78,6 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::compact::collect_user_messages;
|
||||
use crate::config::Config;
|
||||
use crate::config::Constrained;
|
||||
use crate::config::ConstraintError;
|
||||
use crate::config::ConstraintResult;
|
||||
use crate::config::GhostSnapshotConfig;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
@@ -89,6 +88,7 @@ use crate::error::Result as CodexResult;
|
||||
#[cfg(test)]
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec_policy::ExecPolicyUpdateError;
|
||||
use crate::feedback_tags;
|
||||
use crate::mcp::auth::compute_auth_statuses;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::model_provider_info::CHAT_WIRE_API_DEPRECATION_SUMMARY;
|
||||
@@ -149,7 +149,6 @@ use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
use codex_async_utils::OrCancelExt;
|
||||
use codex_execpolicy::Policy as ExecPolicy;
|
||||
use codex_otel::otel_manager::OtelManager;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
@@ -219,11 +218,10 @@ impl Codex {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
|
||||
let loaded_skills = if config.features.enabled(Feature::Skills) {
|
||||
Some(skills_manager.skills_for_cwd(&config.cwd))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let loaded_skills = config
|
||||
.features
|
||||
.enabled(Feature::Skills)
|
||||
.then(|| skills_manager.skills_for_cwd(&config.cwd));
|
||||
|
||||
if let Some(outcome) = &loaded_skills {
|
||||
for err in &outcome.errors {
|
||||
@@ -243,14 +241,15 @@ impl Codex {
|
||||
)
|
||||
.await;
|
||||
|
||||
let exec_policy = load_exec_policy_for_features(&config.features, &config.codex_home)
|
||||
let exec_policy = ExecPolicyManager::load(&config.features, &config.config_layer_stack)
|
||||
.await
|
||||
.map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?;
|
||||
let exec_policy = Arc::new(RwLock::new(exec_policy));
|
||||
|
||||
let config = Arc::new(config);
|
||||
if config.features.enabled(Feature::RemoteModels)
|
||||
&& let Err(err) = models_manager.refresh_available_models(&config).await
|
||||
&& let Err(err) = models_manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
{
|
||||
error!("failed to refresh available models: {err:?}");
|
||||
}
|
||||
@@ -268,7 +267,6 @@ impl Codex {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy,
|
||||
session_source,
|
||||
};
|
||||
|
||||
@@ -280,6 +278,7 @@ impl Codex {
|
||||
config.clone(),
|
||||
auth_manager.clone(),
|
||||
models_manager.clone(),
|
||||
exec_policy,
|
||||
tx_event.clone(),
|
||||
conversation_history,
|
||||
session_source_clone,
|
||||
@@ -373,7 +372,6 @@ pub(crate) struct TurnContext {
|
||||
pub(crate) final_output_json_schema: Option<Value>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) tool_call_gate: Arc<ReadinessFlag>,
|
||||
pub(crate) exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
pub(crate) truncation_policy: TruncationPolicy,
|
||||
}
|
||||
|
||||
@@ -417,7 +415,7 @@ pub(crate) struct SessionConfiguration {
|
||||
/// When to escalate for approval for execution
|
||||
approval_policy: Constrained<AskForApproval>,
|
||||
/// How to sandbox commands executed in the system
|
||||
sandbox_policy: SandboxPolicy,
|
||||
sandbox_policy: Constrained<SandboxPolicy>,
|
||||
|
||||
/// Working directory that should be treated as the *root* of the
|
||||
/// session. All relative paths supplied by the model as well as the
|
||||
@@ -428,9 +426,6 @@ pub(crate) struct SessionConfiguration {
|
||||
/// operate deterministically.
|
||||
cwd: PathBuf,
|
||||
|
||||
/// Execpolicy policy, applied only when enabled by feature flag.
|
||||
exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
|
||||
// TODO(pakrym): Remove config from here
|
||||
original_config_do_not_use: Arc<Config>,
|
||||
/// Source of the session (cli, vscode, exec, mcp, ...)
|
||||
@@ -453,7 +448,7 @@ impl SessionConfiguration {
|
||||
next_configuration.approval_policy.set(approval_policy)?;
|
||||
}
|
||||
if let Some(sandbox_policy) = updates.sandbox_policy.clone() {
|
||||
next_configuration.sandbox_policy = sandbox_policy;
|
||||
next_configuration.sandbox_policy.set(sandbox_policy)?;
|
||||
}
|
||||
if let Some(cwd) = updates.cwd.clone() {
|
||||
next_configuration.cwd = cwd;
|
||||
@@ -528,14 +523,13 @@ impl Session {
|
||||
compact_prompt: session_configuration.compact_prompt.clone(),
|
||||
user_instructions: session_configuration.user_instructions.clone(),
|
||||
approval_policy: session_configuration.approval_policy.value(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
shell_environment_policy: per_turn_config.shell_environment_policy.clone(),
|
||||
tools_config,
|
||||
ghost_snapshot: per_turn_config.ghost_snapshot.clone(),
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: session_configuration.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(
|
||||
per_turn_config.as_ref(),
|
||||
model_family.truncation_policy,
|
||||
@@ -549,6 +543,7 @@ impl Session {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
exec_policy: ExecPolicyManager,
|
||||
tx_event: Sender<Event>,
|
||||
initial_history: InitialHistory,
|
||||
session_source: SessionSource,
|
||||
@@ -645,7 +640,7 @@ impl Session {
|
||||
config.model_context_window,
|
||||
config.model_auto_compact_token_limit,
|
||||
config.approval_policy.value(),
|
||||
config.sandbox_policy.clone(),
|
||||
config.sandbox_policy.get().clone(),
|
||||
config.mcp_servers.keys().map(String::as_str).collect(),
|
||||
config.active_profile.clone(),
|
||||
);
|
||||
@@ -668,6 +663,7 @@ impl Session {
|
||||
rollout: Mutex::new(Some(rollout_recorder)),
|
||||
user_shell: Arc::new(default_shell),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
@@ -695,7 +691,7 @@ impl Session {
|
||||
model: session_configuration.model.clone(),
|
||||
model_provider_id: config.model_provider_id.clone(),
|
||||
approval_policy: session_configuration.approval_policy.value(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
cwd: session_configuration.cwd.clone(),
|
||||
reasoning_effort: session_configuration.model_reasoning_effort,
|
||||
history_log_id,
|
||||
@@ -712,7 +708,7 @@ impl Session {
|
||||
// Construct sandbox_state before initialize() so it can be sent to each
|
||||
// MCP server immediately after it becomes ready (avoiding blocking).
|
||||
let sandbox_state = SandboxState {
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: session_configuration.cwd.clone(),
|
||||
};
|
||||
@@ -733,30 +729,6 @@ impl Session {
|
||||
// record_initial_history can emit events. We record only after the SessionConfiguredEvent is emitted.
|
||||
sess.record_initial_history(initial_history).await;
|
||||
|
||||
if sess.enabled(Feature::Skills) {
|
||||
let mut rx = sess
|
||||
.services
|
||||
.skills_manager
|
||||
.subscribe_skills_update_notifications();
|
||||
let sess = Arc::downgrade(&sess);
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match rx.recv().await {
|
||||
Ok(()) => {
|
||||
let Some(sess) = sess.upgrade() else {
|
||||
break;
|
||||
};
|
||||
let turn_context = sess.new_default_turn().await;
|
||||
sess.send_event(turn_context.as_ref(), EventMsg::SkillsUpdateAvailable)
|
||||
.await;
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue,
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(sess)
|
||||
}
|
||||
|
||||
@@ -839,6 +811,13 @@ impl Session {
|
||||
.await;
|
||||
}
|
||||
|
||||
// Seed usage info from the recorded rollout so UIs can show token counts
|
||||
// immediately on resume/fork.
|
||||
if let Some(info) = Self::last_token_info_from_rollout(&rollout_items) {
|
||||
let mut state = self.state.lock().await;
|
||||
state.set_token_info(Some(info));
|
||||
}
|
||||
|
||||
// If persisting, persist all rollout items as-is (recorder filters)
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
@@ -849,6 +828,13 @@ impl Session {
|
||||
}
|
||||
}
|
||||
|
||||
fn last_token_info_from_rollout(rollout_items: &[RolloutItem]) -> Option<TokenUsageInfo> {
|
||||
rollout_items.iter().rev().find_map(|item| match item {
|
||||
RolloutItem::EventMsg(EventMsg::TokenCount(ev)) => ev.info.clone(),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_settings(
|
||||
&self,
|
||||
updates: SessionSettingsUpdate,
|
||||
@@ -861,11 +847,8 @@ impl Session {
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let wrapped = ConstraintError {
|
||||
message: format!("Could not update config: {err}"),
|
||||
};
|
||||
warn!(%wrapped, "rejected session settings update");
|
||||
Err(wrapped)
|
||||
warn!("rejected session settings update: {err}");
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -886,18 +869,15 @@ impl Session {
|
||||
}
|
||||
Err(err) => {
|
||||
drop(state);
|
||||
let wrapped = ConstraintError {
|
||||
message: format!("Could not update config: {err}"),
|
||||
};
|
||||
self.send_event_raw(Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::Error(ErrorEvent {
|
||||
message: wrapped.to_string(),
|
||||
message: err.to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::BadRequest),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
return Err(wrapped);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -923,7 +903,7 @@ impl Session {
|
||||
|
||||
if sandbox_policy_changed {
|
||||
let sandbox_state = SandboxState {
|
||||
sandbox_policy: per_turn_config.sandbox_policy.clone(),
|
||||
sandbox_policy: per_turn_config.sandbox_policy.get().clone(),
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: per_turn_config.cwd.clone(),
|
||||
};
|
||||
@@ -1057,29 +1037,24 @@ impl Session {
|
||||
amendment: &ExecPolicyAmendment,
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let features = self.features.clone();
|
||||
let (codex_home, current_policy) = {
|
||||
let state = self.state.lock().await;
|
||||
(
|
||||
state
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone(),
|
||||
state.session_configuration.exec_policy.clone(),
|
||||
)
|
||||
};
|
||||
let codex_home = self
|
||||
.state
|
||||
.lock()
|
||||
.await
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone();
|
||||
|
||||
if !features.enabled(Feature::ExecPolicy) {
|
||||
error!("attempted to append execpolicy rule while execpolicy feature is disabled");
|
||||
return Err(ExecPolicyUpdateError::FeatureDisabled);
|
||||
}
|
||||
|
||||
crate::exec_policy::append_execpolicy_amendment_and_update(
|
||||
&codex_home,
|
||||
¤t_policy,
|
||||
&amendment.command,
|
||||
)
|
||||
.await?;
|
||||
self.services
|
||||
.exec_policy
|
||||
.append_amendment_and_update(&codex_home, amendment)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1464,12 +1439,14 @@ impl Session {
|
||||
message: impl Into<String>,
|
||||
codex_error: CodexErr,
|
||||
) {
|
||||
let additional_details = codex_error.to_string();
|
||||
let codex_error_info = CodexErrorInfo::ResponseStreamDisconnected {
|
||||
http_status_code: codex_error.http_status_code_value(),
|
||||
};
|
||||
let event = EventMsg::StreamError(StreamErrorEvent {
|
||||
message: message.into(),
|
||||
codex_error_info: Some(codex_error_info),
|
||||
additional_details: Some(additional_details),
|
||||
});
|
||||
self.send_event(turn_context, event).await;
|
||||
}
|
||||
@@ -1799,7 +1776,16 @@ mod handlers {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
},
|
||||
),
|
||||
Op::UserInput { items } => (items, SessionSettingsUpdate::default()),
|
||||
Op::UserInput {
|
||||
items,
|
||||
final_output_json_schema,
|
||||
} => (
|
||||
items,
|
||||
SessionSettingsUpdate {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
..Default::default()
|
||||
},
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -2091,7 +2077,7 @@ mod handlers {
|
||||
review_request: ReviewRequest,
|
||||
) {
|
||||
let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await;
|
||||
match resolve_review_request(review_request, config.cwd.as_path()) {
|
||||
match resolve_review_request(review_request, turn_context.cwd.as_path()) {
|
||||
Ok(resolved) => {
|
||||
spawn_review_thread(
|
||||
Arc::clone(sess),
|
||||
@@ -2186,7 +2172,6 @@ async fn spawn_review_thread(
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: parent_turn_context.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy),
|
||||
};
|
||||
|
||||
@@ -2212,6 +2197,7 @@ fn skills_to_info(skills: &[SkillMetadata]) -> Vec<ProtocolSkillMetadata> {
|
||||
.map(|skill| ProtocolSkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
path: skill.path.clone(),
|
||||
scope: skill.scope,
|
||||
})
|
||||
@@ -2266,15 +2252,11 @@ pub(crate) async fn run_task(
|
||||
});
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
let skills_outcome = if sess.enabled(Feature::Skills) {
|
||||
Some(
|
||||
sess.services
|
||||
.skills_manager
|
||||
.skills_for_cwd(&turn_context.cwd),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let skills_outcome = sess.enabled(Feature::Skills).then(|| {
|
||||
sess.services
|
||||
.skills_manager
|
||||
.skills_for_cwd(&turn_context.cwd)
|
||||
});
|
||||
|
||||
let SkillInjections {
|
||||
items: skill_items,
|
||||
@@ -2560,8 +2542,22 @@ async fn try_run_turn(
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
|
||||
feedback_tags!(
|
||||
model = turn_context.client.get_model(),
|
||||
approval_policy = turn_context.approval_policy,
|
||||
sandbox_policy = turn_context.sandbox_policy,
|
||||
effort = turn_context.client.get_reasoning_effort(),
|
||||
auth_mode = sess.services.auth_manager.get_auth_mode(),
|
||||
features = sess.features.enabled_features(),
|
||||
);
|
||||
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context
|
||||
.client
|
||||
@@ -2652,6 +2648,13 @@ async fn try_run_turn(
|
||||
// token usage is available to avoid duplicate TokenCount events.
|
||||
sess.update_rate_limits(&turn_context, snapshot).await;
|
||||
}
|
||||
ResponseEvent::ModelsEtag(etag) => {
|
||||
// Update internal state with latest models etag
|
||||
sess.services
|
||||
.models_manager
|
||||
.refresh_if_new_etag(etag, sess.features.enabled(Feature::RemoteModels))
|
||||
.await;
|
||||
}
|
||||
ResponseEvent::Completed {
|
||||
response_id: _,
|
||||
token_usage,
|
||||
@@ -2778,12 +2781,12 @@ pub(crate) use tests::make_session_and_context_with_rx;
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::CodexAuth;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::shell::default_user_shell;
|
||||
use crate::tools::format_exec_output_str;
|
||||
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
use crate::protocol::CompactedItem;
|
||||
@@ -2792,6 +2795,9 @@ mod tests {
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::RateLimitWindow;
|
||||
use crate::protocol::ResumedHistory;
|
||||
use crate::protocol::TokenCountEvent;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
use crate::state::TaskKind;
|
||||
use crate::tasks::SessionTask;
|
||||
use crate::tasks::SessionTaskContext;
|
||||
@@ -2806,6 +2812,7 @@ mod tests {
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
@@ -2818,9 +2825,9 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration as StdDuration;
|
||||
|
||||
#[test]
|
||||
fn reconstruct_history_matches_live_compactions() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
#[tokio::test]
|
||||
async fn reconstruct_history_matches_live_compactions() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (rollout_items, expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
let reconstructed = session.reconstruct_history_from_rollout(&turn_context, &rollout_items);
|
||||
@@ -2828,47 +2835,117 @@ mod tests {
|
||||
assert_eq!(expected, reconstructed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_initial_history_reconstructs_resumed_transcript() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_resumed_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (rollout_items, expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
tokio_test::block_on(session.record_initial_history(InitialHistory::Resumed(
|
||||
ResumedHistory {
|
||||
session
|
||||
.record_initial_history(InitialHistory::Resumed(ResumedHistory {
|
||||
conversation_id: ConversationId::default(),
|
||||
history: rollout_items,
|
||||
rollout_path: PathBuf::from("/tmp/resume.jsonl"),
|
||||
}))
|
||||
.await;
|
||||
|
||||
let actual = session.state.lock().await.clone_history().get_history();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_seeds_token_info_from_rollout() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (mut rollout_items, _expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
let info1 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 30,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 3,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 4,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 7,
|
||||
},
|
||||
model_context_window: Some(1_000),
|
||||
};
|
||||
let info2 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 100,
|
||||
cached_input_tokens: 50,
|
||||
output_tokens: 200,
|
||||
reasoning_output_tokens: 25,
|
||||
total_tokens: 375,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 5,
|
||||
total_tokens: 35,
|
||||
},
|
||||
model_context_window: Some(2_000),
|
||||
};
|
||||
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info1),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info2.clone()),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
|
||||
let actual = tokio_test::block_on(async {
|
||||
session.state.lock().await.clone_history().get_history()
|
||||
});
|
||||
assert_eq!(expected, actual);
|
||||
session
|
||||
.record_initial_history(InitialHistory::Resumed(ResumedHistory {
|
||||
conversation_id: ConversationId::default(),
|
||||
history: rollout_items,
|
||||
rollout_path: PathBuf::from("/tmp/resume.jsonl"),
|
||||
}))
|
||||
.await;
|
||||
|
||||
let actual = session.state.lock().await.token_info();
|
||||
assert_eq!(actual, Some(info2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_initial_history_reconstructs_forked_transcript() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_forked_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (rollout_items, expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
tokio_test::block_on(session.record_initial_history(InitialHistory::Forked(rollout_items)));
|
||||
session
|
||||
.record_initial_history(InitialHistory::Forked(rollout_items))
|
||||
.await;
|
||||
|
||||
let actual = tokio_test::block_on(async {
|
||||
session.state.lock().await.clone_history().get_history()
|
||||
});
|
||||
let actual = session.state.lock().await.clone_history().get_history();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_rate_limits_retains_previous_credits() {
|
||||
#[tokio::test]
|
||||
async fn set_rate_limits_retains_previous_credits() {
|
||||
let codex_home = tempfile::tempdir().expect("create temp dir");
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)
|
||||
.expect("load default test config");
|
||||
let config = build_test_config(codex_home.path()).await;
|
||||
let config = Arc::new(config);
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
@@ -2884,7 +2961,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -2932,15 +3008,10 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_rate_limits_updates_plan_type_when_present() {
|
||||
#[tokio::test]
|
||||
async fn set_rate_limits_updates_plan_type_when_present() {
|
||||
let codex_home = tempfile::tempdir().expect("create temp dir");
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)
|
||||
.expect("load default test config");
|
||||
let config = build_test_config(codex_home.path()).await;
|
||||
let config = Arc::new(config);
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
@@ -2956,7 +3027,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -3030,8 +3100,8 @@ mod tests {
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn includes_timed_out_message() {
|
||||
#[tokio::test]
|
||||
async fn includes_timed_out_message() {
|
||||
let exec = ExecToolCallOutput {
|
||||
exit_code: 0,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
@@ -3040,7 +3110,7 @@ mod tests {
|
||||
duration: StdDuration::from_secs(1),
|
||||
timed_out: true,
|
||||
};
|
||||
let (_, turn_context) = make_session_and_context();
|
||||
let (_, turn_context) = make_session_and_context().await;
|
||||
|
||||
let out = format_exec_output_str(&exec, turn_context.truncation_policy);
|
||||
|
||||
@@ -3113,6 +3183,14 @@ mod tests {
|
||||
})
|
||||
}
|
||||
|
||||
async fn build_test_config(codex_home: &Path) -> Config {
|
||||
ConfigBuilder::default()
|
||||
.codex_home(codex_home.to_path_buf())
|
||||
.build()
|
||||
.await
|
||||
.expect("load default test config")
|
||||
}
|
||||
|
||||
fn otel_manager(
|
||||
conversation_id: ConversationId,
|
||||
config: &Config,
|
||||
@@ -3132,20 +3210,16 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn make_session_and_context() -> (Session, TurnContext) {
|
||||
pub(crate) async fn make_session_and_context() -> (Session, TurnContext) {
|
||||
let (tx_event, _rx_event) = async_channel::unbounded();
|
||||
let codex_home = tempfile::tempdir().expect("create temp dir");
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)
|
||||
.expect("load default test config");
|
||||
let config = build_test_config(codex_home.path()).await;
|
||||
let config = Arc::new(config);
|
||||
let conversation_id = ConversationId::default();
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3160,7 +3234,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3186,9 +3259,10 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: auth_manager.clone(),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
@@ -3219,24 +3293,20 @@ mod tests {
|
||||
|
||||
// Like make_session_and_context, but returns Arc<Session> and the event receiver
|
||||
// so tests can assert on emitted events.
|
||||
pub(crate) fn make_session_and_context_with_rx() -> (
|
||||
pub(crate) async fn make_session_and_context_with_rx() -> (
|
||||
Arc<Session>,
|
||||
Arc<TurnContext>,
|
||||
async_channel::Receiver<Event>,
|
||||
) {
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
let codex_home = tempfile::tempdir().expect("create temp dir");
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)
|
||||
.expect("load default test config");
|
||||
let config = build_test_config(codex_home.path()).await;
|
||||
let config = Arc::new(config);
|
||||
let conversation_id = ConversationId::default();
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3251,7 +3321,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3277,9 +3346,10 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
@@ -3310,7 +3380,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_model_warning_appends_user_message() {
|
||||
let (mut session, turn_context) = make_session_and_context();
|
||||
let (mut session, turn_context) = make_session_and_context().await;
|
||||
let mut features = Features::with_defaults();
|
||||
features.enable(Feature::ModelWarnings);
|
||||
session.features = features;
|
||||
@@ -3369,7 +3439,7 @@ mod tests {
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
#[test_log::test]
|
||||
async fn abort_regular_task_emits_turn_aborted_only() {
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx();
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx().await;
|
||||
let input = vec![UserInput::Text {
|
||||
text: "hello".to_string(),
|
||||
}];
|
||||
@@ -3398,7 +3468,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn abort_gracefuly_emits_turn_aborted_only() {
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx();
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx().await;
|
||||
let input = vec![UserInput::Text {
|
||||
text: "hello".to_string(),
|
||||
}];
|
||||
@@ -3424,7 +3494,7 @@ mod tests {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn abort_review_task_emits_exited_then_aborted_and_records_history() {
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx();
|
||||
let (sess, tc, rx) = make_session_and_context_with_rx().await;
|
||||
let input = vec![UserInput::Text {
|
||||
text: "start review".to_string(),
|
||||
}];
|
||||
@@ -3472,7 +3542,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn fatal_tool_error_stops_turn_and_reports_error() {
|
||||
let (session, turn_context, _rx) = make_session_and_context_with_rx();
|
||||
let (session, turn_context, _rx) = make_session_and_context_with_rx().await;
|
||||
let tools = {
|
||||
session
|
||||
.services
|
||||
@@ -3635,7 +3705,7 @@ mod tests {
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use std::collections::HashMap;
|
||||
|
||||
let (session, mut turn_context_raw) = make_session_and_context();
|
||||
let (session, mut turn_context_raw) = make_session_and_context().await;
|
||||
// Ensure policy is NOT OnRequest so the early rejection path triggers
|
||||
turn_context_raw.approval_policy = AskForApproval::OnFailure;
|
||||
let session = Arc::new(session);
|
||||
@@ -3766,7 +3836,7 @@ mod tests {
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
|
||||
let (session, mut turn_context_raw) = make_session_and_context();
|
||||
let (session, mut turn_context_raw) = make_session_and_context().await;
|
||||
turn_context_raw.approval_policy = AskForApproval::OnFailure;
|
||||
let session = Arc::new(session);
|
||||
let turn_context = Arc::new(turn_context_raw);
|
||||
|
||||
@@ -25,7 +25,7 @@ use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
|
||||
/// Start an interactive sub-Codex conversation and return IO channels.
|
||||
@@ -118,7 +118,11 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
.await?;
|
||||
|
||||
// Send the initial input to kick off the one-shot turn.
|
||||
io.submit(Op::UserInput { items: input }).await?;
|
||||
io.submit(Op::UserInput {
|
||||
items: input,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Bridge events so we can observe completion and shut down automatically.
|
||||
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
@@ -184,6 +188,10 @@ async fn forward_events(
|
||||
id: _,
|
||||
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::TokenCount(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::SessionConfigured(_),
|
||||
@@ -366,7 +374,7 @@ mod tests {
|
||||
rx_event: rx_events,
|
||||
});
|
||||
|
||||
let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx();
|
||||
let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx().await;
|
||||
|
||||
let (tx_out, rx_out) = bounded(1);
|
||||
tx_out
|
||||
|
||||
@@ -21,8 +21,11 @@ pub fn requires_initial_appoval(
|
||||
match policy {
|
||||
AskForApproval::Never | AskForApproval::OnFailure => false,
|
||||
AskForApproval::OnRequest => {
|
||||
// In DangerFullAccess, only prompt if the command looks dangerous.
|
||||
if matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) {
|
||||
// In DangerFullAccess or ExternalSandbox, only prompt if the command looks dangerous.
|
||||
if matches!(
|
||||
sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
|
||||
) {
|
||||
return command_might_be_dangerous(command);
|
||||
}
|
||||
|
||||
@@ -83,6 +86,7 @@ fn is_dangerous_to_call_with_exec(command: &[String]) -> bool {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::protocol::NetworkAccess;
|
||||
|
||||
fn vec_str(items: &[&str]) -> Vec<String> {
|
||||
items.iter().map(std::string::ToString::to_string).collect()
|
||||
@@ -150,4 +154,23 @@ mod tests {
|
||||
fn rm_f_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&["rm", "-f", "/"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn external_sandbox_only_prompts_for_dangerous_commands() {
|
||||
let external_policy = SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
};
|
||||
assert!(!requires_initial_appoval(
|
||||
AskForApproval::OnRequest,
|
||||
&external_policy,
|
||||
&vec_str(&["ls"]),
|
||||
SandboxPermissions::UseDefault,
|
||||
));
|
||||
assert!(requires_initial_appoval(
|
||||
AskForApproval::OnRequest,
|
||||
&external_policy,
|
||||
&vec_str(&["rm", "-rf", "/"]),
|
||||
SandboxPermissions::UseDefault,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +86,11 @@ async fn run_compact_task_inner(
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
|
||||
@@ -4,19 +4,25 @@ use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error, PartialEq, Eq)]
|
||||
#[error("{message}")]
|
||||
pub struct ConstraintError {
|
||||
pub message: String,
|
||||
pub enum ConstraintError {
|
||||
#[error("value `{candidate}` is not in the allowed set {allowed}")]
|
||||
InvalidValue { candidate: String, allowed: String },
|
||||
|
||||
#[error("field `{field_name}` cannot be empty")]
|
||||
EmptyField { field_name: String },
|
||||
}
|
||||
|
||||
impl ConstraintError {
|
||||
pub fn invalid_value(candidate: impl Into<String>, allowed: impl Into<String>) -> Self {
|
||||
Self {
|
||||
message: format!(
|
||||
"value `{}` is not in the allowed set {}",
|
||||
candidate.into(),
|
||||
allowed.into()
|
||||
),
|
||||
Self::InvalidValue {
|
||||
candidate: candidate.into(),
|
||||
allowed: allowed.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_field(field_name: impl Into<String>) -> Self {
|
||||
Self::EmptyField {
|
||||
field_name: field_name.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,6 +63,32 @@ impl<T: Send + Sync> Constrained<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allow_only(value: T) -> Self
|
||||
where
|
||||
T: PartialEq + Send + Sync + fmt::Debug + Clone + 'static,
|
||||
{
|
||||
#[expect(clippy::expect_used)]
|
||||
Self::new(value.clone(), move |candidate| {
|
||||
if *candidate == value {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ConstraintError::invalid_value(
|
||||
format!("{candidate:?}"),
|
||||
format!("{value:?}"),
|
||||
))
|
||||
}
|
||||
})
|
||||
.expect("initial value should always be valid")
|
||||
}
|
||||
|
||||
/// Allow any value of T, using T's Default as the initial value.
|
||||
pub fn allow_any_from_default() -> Self
|
||||
where
|
||||
T: Default,
|
||||
{
|
||||
Self::allow_any(T::default())
|
||||
}
|
||||
|
||||
pub fn allow_values(initial_value: T, allowed: Vec<T>) -> ConstraintResult<Self>
|
||||
where
|
||||
T: PartialEq + Send + Sync + fmt::Debug + 'static,
|
||||
@@ -129,6 +161,12 @@ mod tests {
|
||||
assert_eq!(constrained.value(), -10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constrained_allow_any_default_uses_default_value() {
|
||||
let constrained = Constrained::<i32>::allow_any_from_default();
|
||||
assert_eq!(constrained.value(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constrained_new_rejects_invalid_initial_value() {
|
||||
let result = Constrained::new(0, |value| {
|
||||
|
||||
@@ -694,7 +694,6 @@ mod tests {
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
use tokio::runtime::Builder;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[test]
|
||||
@@ -1455,22 +1454,16 @@ model_reasoning_effort = "high"
|
||||
assert_eq!(contents, initial_expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blocking_set_asynchronous_helpers_available() {
|
||||
let rt = Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("runtime");
|
||||
#[tokio::test]
|
||||
async fn blocking_set_asynchronous_helpers_available() {
|
||||
let tmp = tempdir().expect("tmpdir");
|
||||
let codex_home = tmp.path().to_path_buf();
|
||||
|
||||
rt.block_on(async {
|
||||
ConfigEditsBuilder::new(&codex_home)
|
||||
.set_hide_full_access_warning(true)
|
||||
.apply()
|
||||
.await
|
||||
.expect("persist");
|
||||
});
|
||||
ConfigEditsBuilder::new(&codex_home)
|
||||
.set_hide_full_access_warning(true)
|
||||
.apply()
|
||||
.await
|
||||
.expect("persist");
|
||||
|
||||
let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
|
||||
let notice = toml::from_str::<TomlValue>(&raw)
|
||||
|
||||
@@ -8,10 +8,14 @@ use crate::config::types::OtelConfig;
|
||||
use crate::config::types::OtelConfigToml;
|
||||
use crate::config::types::OtelExporterKind;
|
||||
use crate::config::types::SandboxWorkspaceWrite;
|
||||
use crate::config::types::ScrollInputMode;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyToml;
|
||||
use crate::config::types::Tui;
|
||||
use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
use crate::features::Feature;
|
||||
use crate::features::FeatureOverrides;
|
||||
@@ -34,12 +38,12 @@ use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use similar::DiffableStr;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
@@ -90,6 +94,10 @@ pub(crate) fn test_config() -> Config {
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
/// Provenance for how this [`Config`] was derived (merged layers + enforced
|
||||
/// requirements).
|
||||
pub config_layer_stack: ConfigLayerStack,
|
||||
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
|
||||
@@ -111,7 +119,7 @@ pub struct Config {
|
||||
/// Approval policy for executing commands.
|
||||
pub approval_policy: Constrained<AskForApproval>,
|
||||
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
pub sandbox_policy: Constrained<SandboxPolicy>,
|
||||
|
||||
/// True if the user passed in an override or set a value in config.toml
|
||||
/// for either of approval_policy or sandbox_mode.
|
||||
@@ -176,6 +184,58 @@ pub struct Config {
|
||||
/// Show startup tooltips in the TUI welcome screen.
|
||||
pub show_tooltips: bool,
|
||||
|
||||
/// Override the events-per-wheel-tick factor for TUI2 scroll normalization.
|
||||
///
|
||||
/// This is the same `tui.scroll_events_per_tick` value from `config.toml`, plumbed through the
|
||||
/// merged [`Config`] object (see [`Tui`]) so TUI2 can normalize scroll event density per
|
||||
/// terminal.
|
||||
pub tui_scroll_events_per_tick: Option<u16>,
|
||||
|
||||
/// Override the number of lines applied per wheel tick in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_lines` value from `config.toml` (see [`Tui`]). TUI2
|
||||
/// applies it to wheel-like scroll streams. Trackpad-like scrolling uses a separate
|
||||
/// `tui.scroll_trackpad_lines` setting.
|
||||
pub tui_scroll_wheel_lines: Option<u16>,
|
||||
|
||||
/// Override the number of lines per tick-equivalent used for trackpad scrolling in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_lines` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_lines: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_accel_events` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_accel_events: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: maximum multiplier applied to trackpad-like streams in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_accel_max` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_accel_max: Option<u16>,
|
||||
|
||||
/// Control how TUI2 interprets mouse scroll input (wheel vs trackpad).
|
||||
///
|
||||
/// This is the same `tui.scroll_mode` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_mode: ScrollInputMode,
|
||||
|
||||
/// Override the wheel tick detection threshold (ms) for TUI2 auto scroll mode.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_tick_detect_max_ms` value from `config.toml` (see
|
||||
/// [`Tui`]).
|
||||
pub tui_scroll_wheel_tick_detect_max_ms: Option<u64>,
|
||||
|
||||
/// Override the wheel-like end-of-stream threshold (ms) for TUI2 auto scroll mode.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_like_max_duration_ms` value from `config.toml` (see
|
||||
/// [`Tui`]).
|
||||
pub tui_scroll_wheel_like_max_duration_ms: Option<u64>,
|
||||
|
||||
/// Invert mouse scroll direction for TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_invert` value from `config.toml` (see [`Tui`]) and is applied
|
||||
/// consistently to both mouse wheels and trackpads.
|
||||
pub tui_scroll_invert: bool,
|
||||
|
||||
/// The directory that should be treated as the current working directory
|
||||
/// for the session. All relative paths inside the business-logic layer are
|
||||
/// resolved against this path.
|
||||
@@ -242,9 +302,6 @@ pub struct Config {
|
||||
/// Optional override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Optional override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
@@ -267,10 +324,6 @@ pub struct Config {
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
pub use_experimental_unified_exec_tool: bool,
|
||||
|
||||
/// If set to `true`, use the experimental official Rust MCP client.
|
||||
/// https://github.com/modelcontextprotocol/rust-sdk
|
||||
pub use_experimental_use_rmcp_client: bool,
|
||||
|
||||
/// Settings for ghost snapshots (used for undo).
|
||||
pub ghost_snapshot: GhostSnapshotConfig,
|
||||
|
||||
@@ -304,41 +357,131 @@ pub struct Config {
|
||||
pub otel: crate::config::types::OtelConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub async fn load_with_cli_overrides(
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
overrides: ConfigOverrides,
|
||||
) -> std::io::Result<Self> {
|
||||
let codex_home = find_codex_home()?;
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ConfigBuilder {
|
||||
codex_home: Option<PathBuf>,
|
||||
cli_overrides: Option<Vec<(String, TomlValue)>>,
|
||||
harness_overrides: Option<ConfigOverrides>,
|
||||
loader_overrides: Option<LoaderOverrides>,
|
||||
thread_agnostic: bool,
|
||||
}
|
||||
|
||||
let root_value = load_resolved_config(
|
||||
&codex_home,
|
||||
impl ConfigBuilder {
|
||||
pub fn codex_home(mut self, codex_home: PathBuf) -> Self {
|
||||
self.codex_home = Some(codex_home);
|
||||
self
|
||||
}
|
||||
|
||||
/// Load a "thread-agnostic" config stack, which intentionally ignores any
|
||||
/// in-repo `.codex/` config layers (because there is no cwd/project context).
|
||||
pub fn thread_agnostic(mut self) -> Self {
|
||||
self.thread_agnostic = true;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn cli_overrides(mut self, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
self.cli_overrides = Some(cli_overrides);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn harness_overrides(mut self, harness_overrides: ConfigOverrides) -> Self {
|
||||
self.harness_overrides = Some(harness_overrides);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn loader_overrides(mut self, loader_overrides: LoaderOverrides) -> Self {
|
||||
self.loader_overrides = Some(loader_overrides);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn build(self) -> std::io::Result<Config> {
|
||||
let Self {
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
crate::config_loader::LoaderOverrides::default(),
|
||||
harness_overrides,
|
||||
loader_overrides,
|
||||
thread_agnostic,
|
||||
} = self;
|
||||
let codex_home = codex_home.map_or_else(find_codex_home, std::io::Result::Ok)?;
|
||||
let cli_overrides = cli_overrides.unwrap_or_default();
|
||||
let harness_overrides = harness_overrides.unwrap_or_default();
|
||||
let loader_overrides = loader_overrides.unwrap_or_default();
|
||||
let cwd = if thread_agnostic {
|
||||
None
|
||||
} else {
|
||||
Some(match harness_overrides.cwd.as_deref() {
|
||||
Some(path) => AbsolutePathBuf::try_from(path)?,
|
||||
None => AbsolutePathBuf::current_dir()?,
|
||||
})
|
||||
};
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(&codex_home, cwd, &cli_overrides, loader_overrides).await?;
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
|
||||
// Note that each layer in ConfigLayerStack should have resolved
|
||||
// relative paths to absolute paths based on the parent folder of the
|
||||
// respective config file, so we should be safe to deserialize without
|
||||
// AbsolutePathBufGuard here.
|
||||
let config_toml: ConfigToml = merged_toml
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
|
||||
Config::load_config_with_layer_stack(
|
||||
config_toml,
|
||||
harness_overrides,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cfg = deserialize_config_toml_with_base(root_value, &codex_home).map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
|
||||
Self::load_from_base_config_with_overrides(cfg, overrides, codex_home)
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// This is the preferred way to create an instance of [Config].
|
||||
pub async fn load_with_cli_overrides(
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
) -> std::io::Result<Self> {
|
||||
ConfigBuilder::default()
|
||||
.cli_overrides(cli_overrides)
|
||||
.build()
|
||||
.await
|
||||
}
|
||||
|
||||
/// This is a secondary way of creating [Config], which is appropriate when
|
||||
/// the harness is meant to be used with a specific configuration that
|
||||
/// ignores user settings. For example, the `codex exec` subcommand is
|
||||
/// designed to use [AskForApproval::Never] exclusively.
|
||||
///
|
||||
/// Further, [ConfigOverrides] contains some options that are not supported
|
||||
/// in [ConfigToml], such as `cwd` and `codex_linux_sandbox_exe`.
|
||||
pub async fn load_with_cli_overrides_and_harness_overrides(
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
harness_overrides: ConfigOverrides,
|
||||
) -> std::io::Result<Self> {
|
||||
ConfigBuilder::default()
|
||||
.cli_overrides(cli_overrides)
|
||||
.harness_overrides(harness_overrides)
|
||||
.build()
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// DEPRECATED: Use [Config::load_with_cli_overrides()] instead because working
|
||||
/// with [ConfigToml] directly means that [ConfigRequirements] have not been
|
||||
/// applied yet, which risks failing to enforce required constraints.
|
||||
pub async fn load_config_as_toml_with_cli_overrides(
|
||||
codex_home: &Path,
|
||||
cwd: &AbsolutePathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
) -> std::io::Result<ConfigToml> {
|
||||
let root_value = load_resolved_config(
|
||||
let config_layer_stack = load_config_layers_state(
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
crate::config_loader::LoaderOverrides::default(),
|
||||
Some(cwd.clone()),
|
||||
&cli_overrides,
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cfg = deserialize_config_toml_with_base(root_value, codex_home).map_err(|e| {
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
let cfg = deserialize_config_toml_with_base(merged_toml, codex_home).map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
@@ -346,15 +489,6 @@ pub async fn load_config_as_toml_with_cli_overrides(
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
async fn load_resolved_config(
|
||||
codex_home: &Path,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
overrides: crate::config_loader::LoaderOverrides,
|
||||
) -> std::io::Result<TomlValue> {
|
||||
let layers = load_config_layers_state(codex_home, &cli_overrides, overrides).await?;
|
||||
Ok(layers.effective_config())
|
||||
}
|
||||
|
||||
fn deserialize_config_toml_with_base(
|
||||
root_value: TomlValue,
|
||||
config_base_dir: &Path,
|
||||
@@ -370,13 +504,22 @@ fn deserialize_config_toml_with_base(
|
||||
pub async fn load_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
) -> std::io::Result<BTreeMap<String, McpServerConfig>> {
|
||||
let root_value = load_resolved_config(
|
||||
codex_home,
|
||||
Vec::new(),
|
||||
crate::config_loader::LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
let Some(servers_value) = root_value.get("mcp_servers") else {
|
||||
// In general, Config::load_with_cli_overrides() should be used to load the
|
||||
// full config with requirements.toml applied, but in this case, we need
|
||||
// access to the raw TOML in order to warn the user about deprecated fields.
|
||||
//
|
||||
// Note that a more precise way to do this would be to audit the individual
|
||||
// config layers for deprecated fields rather than reporting on the merged
|
||||
// result.
|
||||
let cli_overrides = Vec::<(String, TomlValue)>::new();
|
||||
// There is no cwd/project context for this query, so this will not include
|
||||
// MCP servers defined in in-repo .codex/ folders.
|
||||
let cwd: Option<AbsolutePathBuf> = None;
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(codex_home, cwd, &cli_overrides, LoaderOverrides::default())
|
||||
.await?;
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
let Some(servers_value) = merged_toml.get("mcp_servers") else {
|
||||
return Ok(BTreeMap::new());
|
||||
};
|
||||
|
||||
@@ -535,7 +678,7 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
@@ -651,9 +794,6 @@ pub struct ConfigToml {
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
@@ -670,6 +810,11 @@ pub struct ConfigToml {
|
||||
#[serde(default)]
|
||||
pub ghost_snapshot: Option<GhostSnapshotToml>,
|
||||
|
||||
/// Markers used to detect the project root when searching parent
|
||||
/// directories for `.codex` folders. Defaults to [".git"] when unset.
|
||||
#[serde(default)]
|
||||
pub project_root_markers: Option<Vec<String>>,
|
||||
|
||||
/// When `true`, checks for Codex updates on startup and surfaces update prompts.
|
||||
/// Set to `false` only if your Codex updates are centrally managed.
|
||||
/// Defaults to `true`.
|
||||
@@ -694,7 +839,6 @@ pub struct ConfigToml {
|
||||
pub experimental_instructions_file: Option<AbsolutePathBuf>,
|
||||
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
/// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama".
|
||||
pub oss_provider: Option<String>,
|
||||
@@ -725,7 +869,7 @@ impl From<ConfigToml> for UserSavedConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ProjectConfig {
|
||||
pub trust_level: Option<TrustLevel>,
|
||||
}
|
||||
@@ -740,7 +884,7 @@ impl ProjectConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -759,7 +903,7 @@ impl From<ToolsToml> for Tools {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct GhostSnapshotToml {
|
||||
/// Exclude untracked files larger than this many bytes from ghost snapshots.
|
||||
#[serde(alias = "ignore_untracked_files_over_bytes")]
|
||||
@@ -927,13 +1071,24 @@ pub fn resolve_oss_provider(
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Meant to be used exclusively for tests: `load_with_overrides()` should
|
||||
/// be used in all other cases.
|
||||
pub fn load_from_base_config_with_overrides(
|
||||
#[cfg(test)]
|
||||
fn load_from_base_config_with_overrides(
|
||||
cfg: ConfigToml,
|
||||
overrides: ConfigOverrides,
|
||||
codex_home: PathBuf,
|
||||
) -> std::io::Result<Self> {
|
||||
// Note this ignores requirements.toml enforcement for tests.
|
||||
let config_layer_stack = ConfigLayerStack::default();
|
||||
Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack)
|
||||
}
|
||||
|
||||
fn load_config_with_layer_stack(
|
||||
cfg: ConfigToml,
|
||||
overrides: ConfigOverrides,
|
||||
codex_home: PathBuf,
|
||||
config_layer_stack: ConfigLayerStack,
|
||||
) -> std::io::Result<Self> {
|
||||
let requirements = config_layer_stack.requirements().clone();
|
||||
let user_instructions = Self::load_instructions(Some(&codex_home));
|
||||
|
||||
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
||||
@@ -1038,7 +1193,8 @@ impl Config {
|
||||
AskForApproval::default()
|
||||
}
|
||||
});
|
||||
let approval_policy = Constrained::allow_any(approval_policy);
|
||||
// TODO(dylan): We should be able to leverage ConfigLayerStack so that
|
||||
// we can reliably check this at every config level.
|
||||
let did_user_set_custom_approval_policy_or_sandbox_mode = approval_policy_override
|
||||
.is_some()
|
||||
|| config_profile.approval_policy.is_some()
|
||||
@@ -1099,7 +1255,6 @@ impl Config {
|
||||
let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform);
|
||||
let tools_web_search_request = features.enabled(Feature::WebSearchRequest);
|
||||
let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
|
||||
let use_experimental_use_rmcp_client = features.enabled(Feature::RmcpClient);
|
||||
|
||||
let forced_chatgpt_workspace_id =
|
||||
cfg.forced_chatgpt_workspace_id.as_ref().and_then(|value| {
|
||||
@@ -1155,6 +1310,20 @@ impl Config {
|
||||
|
||||
let check_for_update_on_startup = cfg.check_for_update_on_startup.unwrap_or(true);
|
||||
|
||||
// Ensure that every field of ConfigRequirements is applied to the final
|
||||
// Config.
|
||||
let ConfigRequirements {
|
||||
approval_policy: mut constrained_approval_policy,
|
||||
sandbox_policy: mut constrained_sandbox_policy,
|
||||
} = requirements;
|
||||
|
||||
constrained_approval_policy
|
||||
.set(approval_policy)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}")))?;
|
||||
constrained_sandbox_policy
|
||||
.set(sandbox_policy)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}")))?;
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
review_model,
|
||||
@@ -1163,8 +1332,8 @@ impl Config {
|
||||
model_provider_id,
|
||||
model_provider,
|
||||
cwd: resolved_cwd,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
approval_policy: constrained_approval_policy,
|
||||
sandbox_policy: constrained_sandbox_policy,
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode,
|
||||
forced_auto_mode_downgraded_on_windows,
|
||||
shell_environment_policy,
|
||||
@@ -1197,6 +1366,7 @@ impl Config {
|
||||
.collect(),
|
||||
tool_output_token_limit: cfg.tool_output_token_limit,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -1214,7 +1384,6 @@ impl Config {
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
|
||||
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
@@ -1225,7 +1394,6 @@ impl Config {
|
||||
include_apply_patch_tool: include_apply_patch_tool_flag,
|
||||
tools_web_search_request,
|
||||
use_experimental_unified_exec_tool,
|
||||
use_experimental_use_rmcp_client,
|
||||
ghost_snapshot,
|
||||
features,
|
||||
active_profile: active_profile_name,
|
||||
@@ -1241,6 +1409,27 @@ impl Config {
|
||||
.unwrap_or_default(),
|
||||
animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true),
|
||||
show_tooltips: cfg.tui.as_ref().map(|t| t.show_tooltips).unwrap_or(true),
|
||||
tui_scroll_events_per_tick: cfg.tui.as_ref().and_then(|t| t.scroll_events_per_tick),
|
||||
tui_scroll_wheel_lines: cfg.tui.as_ref().and_then(|t| t.scroll_wheel_lines),
|
||||
tui_scroll_trackpad_lines: cfg.tui.as_ref().and_then(|t| t.scroll_trackpad_lines),
|
||||
tui_scroll_trackpad_accel_events: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_trackpad_accel_events),
|
||||
tui_scroll_trackpad_accel_max: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_trackpad_accel_max),
|
||||
tui_scroll_mode: cfg.tui.as_ref().map(|t| t.scroll_mode).unwrap_or_default(),
|
||||
tui_scroll_wheel_tick_detect_max_ms: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_wheel_tick_detect_max_ms),
|
||||
tui_scroll_wheel_like_max_duration_ms: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_wheel_like_max_duration_ms),
|
||||
tui_scroll_invert: cfg.tui.as_ref().map(|t| t.scroll_invert).unwrap_or(false),
|
||||
otel: {
|
||||
let t: OtelConfigToml = cfg.otel.unwrap_or_default();
|
||||
let log_user_prompt = t.log_user_prompt.unwrap_or(false);
|
||||
@@ -1413,8 +1602,23 @@ persistence = "none"
|
||||
.expect("TUI config without notifications should succeed");
|
||||
let tui = parsed.tui.expect("config should include tui section");
|
||||
|
||||
assert_eq!(tui.notifications, Notifications::Enabled(true));
|
||||
assert!(tui.show_tooltips);
|
||||
assert_eq!(
|
||||
tui,
|
||||
Tui {
|
||||
notifications: Notifications::Enabled(true),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
scroll_events_per_tick: None,
|
||||
scroll_wheel_lines: None,
|
||||
scroll_trackpad_lines: None,
|
||||
scroll_trackpad_accel_events: None,
|
||||
scroll_trackpad_accel_max: None,
|
||||
scroll_mode: ScrollInputMode::Auto,
|
||||
scroll_wheel_tick_detect_max_ms: None,
|
||||
scroll_wheel_like_max_duration_ms: None,
|
||||
scroll_invert: false,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1586,12 +1790,12 @@ trust_level = "trusted"
|
||||
config.forced_auto_mode_downgraded_on_windows,
|
||||
"expected workspace-write request to be downgraded on Windows"
|
||||
);
|
||||
match config.sandbox_policy {
|
||||
SandboxPolicy::ReadOnly => {}
|
||||
match config.sandbox_policy.get() {
|
||||
&SandboxPolicy::ReadOnly => {}
|
||||
other => panic!("expected read-only policy on Windows, got {other:?}"),
|
||||
}
|
||||
} else {
|
||||
match config.sandbox_policy {
|
||||
match config.sandbox_policy.get() {
|
||||
SandboxPolicy::WorkspaceWrite { writable_roots, .. } => {
|
||||
assert_eq!(
|
||||
writable_roots
|
||||
@@ -1723,8 +1927,8 @@ trust_level = "trusted"
|
||||
)?;
|
||||
|
||||
assert!(matches!(
|
||||
config.sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess
|
||||
config.sandbox_policy.get(),
|
||||
&SandboxPolicy::DangerFullAccess
|
||||
));
|
||||
assert!(config.did_user_set_custom_approval_policy_or_sandbox_mode);
|
||||
|
||||
@@ -1760,11 +1964,14 @@ trust_level = "trusted"
|
||||
)?;
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
assert!(matches!(config.sandbox_policy, SandboxPolicy::ReadOnly));
|
||||
assert!(matches!(
|
||||
config.sandbox_policy.get(),
|
||||
SandboxPolicy::ReadOnly
|
||||
));
|
||||
assert!(config.forced_auto_mode_downgraded_on_windows);
|
||||
} else {
|
||||
assert!(matches!(
|
||||
config.sandbox_policy,
|
||||
config.sandbox_policy.get(),
|
||||
SandboxPolicy::WorkspaceWrite { .. }
|
||||
));
|
||||
assert!(!config.forced_auto_mode_downgraded_on_windows);
|
||||
@@ -1800,7 +2007,6 @@ trust_level = "trusted"
|
||||
let codex_home = TempDir::new()?;
|
||||
let cfg = ConfigToml {
|
||||
experimental_use_unified_exec_tool: Some(true),
|
||||
experimental_use_rmcp_client: Some(true),
|
||||
experimental_use_freeform_apply_patch: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -1813,12 +2019,10 @@ trust_level = "trusted"
|
||||
|
||||
assert!(config.features.enabled(Feature::ApplyPatchFreeform));
|
||||
assert!(config.features.enabled(Feature::UnifiedExec));
|
||||
assert!(config.features.enabled(Feature::RmcpClient));
|
||||
|
||||
assert!(config.include_apply_patch_tool);
|
||||
|
||||
assert!(config.use_experimental_unified_exec_tool);
|
||||
assert!(config.use_experimental_use_rmcp_client);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1854,18 +2058,24 @@ trust_level = "trusted"
|
||||
std::fs::write(&config_path, "mcp_oauth_credentials_store = \"file\"\n")?;
|
||||
std::fs::write(&managed_path, "mcp_oauth_credentials_store = \"keyring\"\n")?;
|
||||
|
||||
let overrides = crate::config_loader::LoaderOverrides {
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let root_value = load_resolved_config(codex_home.path(), Vec::new(), overrides).await?;
|
||||
let cfg =
|
||||
deserialize_config_toml_with_base(root_value, codex_home.path()).map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides).await?;
|
||||
let cfg = deserialize_config_toml_with_base(
|
||||
config_layer_stack.effective_config(),
|
||||
codex_home.path(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
assert_eq!(
|
||||
cfg.mcp_oauth_credentials_store,
|
||||
Some(OAuthCredentialsStoreMode::Keyring),
|
||||
@@ -1884,6 +2094,43 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn config_builder_thread_agnostic_ignores_project_layers() -> anyhow::Result<()> {
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
std::fs::create_dir_all(&codex_home)?;
|
||||
std::fs::write(codex_home.join(CONFIG_TOML_FILE), "model = \"from-user\"\n")?;
|
||||
|
||||
let project = tmp.path().join("project");
|
||||
std::fs::create_dir_all(project.join(".codex"))?;
|
||||
std::fs::write(
|
||||
project.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"model = \"from-project\"\n",
|
||||
)?;
|
||||
|
||||
let harness_overrides = ConfigOverrides {
|
||||
cwd: Some(project),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let with_project_layers = ConfigBuilder::default()
|
||||
.codex_home(codex_home.clone())
|
||||
.harness_overrides(harness_overrides.clone())
|
||||
.build()
|
||||
.await?;
|
||||
assert_eq!(with_project_layers.model.as_deref(), Some("from-project"));
|
||||
|
||||
let thread_agnostic = ConfigBuilder::default()
|
||||
.codex_home(codex_home)
|
||||
.harness_overrides(harness_overrides)
|
||||
.thread_agnostic()
|
||||
.build()
|
||||
.await?;
|
||||
assert_eq!(thread_agnostic.model.as_deref(), Some("from-user"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -1969,24 +2216,30 @@ trust_level = "trusted"
|
||||
)?;
|
||||
std::fs::write(&managed_path, "model = \"managed_config\"\n")?;
|
||||
|
||||
let overrides = crate::config_loader::LoaderOverrides {
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let root_value = load_resolved_config(
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
let config_layer_stack = load_config_layers_state(
|
||||
codex_home.path(),
|
||||
vec![("model".to_string(), TomlValue::String("cli".to_string()))],
|
||||
Some(cwd),
|
||||
&[("model".to_string(), TomlValue::String("cli".to_string()))],
|
||||
overrides,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cfg =
|
||||
deserialize_config_toml_with_base(root_value, codex_home.path()).map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
let cfg = deserialize_config_toml_with_base(
|
||||
config_layer_stack.effective_config(),
|
||||
codex_home.path(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
e
|
||||
})?;
|
||||
|
||||
assert_eq!(cfg.model.as_deref(), Some("managed_config"));
|
||||
Ok(())
|
||||
@@ -2955,7 +3208,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::Never),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -2970,6 +3223,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -2978,7 +3232,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -2989,7 +3242,6 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("o3".to_string()),
|
||||
@@ -3001,6 +3253,15 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
},
|
||||
o3_profile_config
|
||||
@@ -3030,7 +3291,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai-chat-completions".to_string(),
|
||||
model_provider: fixture.openai_chat_completions_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::UnlessTrusted),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3045,6 +3306,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3053,7 +3315,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3064,7 +3325,6 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
@@ -3076,6 +3336,15 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3120,7 +3389,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3135,6 +3404,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3143,7 +3413,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3154,7 +3423,6 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("zdr".to_string()),
|
||||
@@ -3166,6 +3434,15 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3196,7 +3473,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3211,6 +3488,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3219,7 +3497,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3230,7 +3507,6 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
@@ -3242,6 +3518,15 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3541,12 +3826,15 @@ trust_level = "untrusted"
|
||||
// Verify that untrusted projects still get WorkspaceWrite sandbox (or ReadOnly on Windows)
|
||||
if cfg!(target_os = "windows") {
|
||||
assert!(
|
||||
matches!(config.sandbox_policy, SandboxPolicy::ReadOnly),
|
||||
matches!(config.sandbox_policy.get(), SandboxPolicy::ReadOnly),
|
||||
"Expected ReadOnly on Windows"
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
matches!(config.sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }),
|
||||
matches!(
|
||||
config.sandbox_policy.get(),
|
||||
SandboxPolicy::WorkspaceWrite { .. }
|
||||
),
|
||||
"Expected WorkspaceWrite sandbox for untrusted project"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
@@ -9,7 +10,7 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ConfigProfile {
|
||||
pub model: Option<String>,
|
||||
/// The key in the `model_providers` map identifying the
|
||||
@@ -25,7 +26,6 @@ pub struct ConfigProfile {
|
||||
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
|
||||
@@ -20,7 +20,9 @@ use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::MergeStrategy;
|
||||
use codex_app_server_protocol::OverriddenMetadata;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::borrow::Cow;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use thiserror::Error;
|
||||
@@ -130,7 +132,7 @@ impl ConfigService {
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, ConfigServiceError> {
|
||||
let layers = self
|
||||
.load_layers_state()
|
||||
.load_thread_agnostic_config()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to read configuration layers", err))?;
|
||||
|
||||
@@ -146,7 +148,13 @@ impl ConfigService {
|
||||
Ok(ConfigReadResponse {
|
||||
config,
|
||||
origins: layers.origins(),
|
||||
layers: params.include_layers.then(|| layers.layers_high_to_low()),
|
||||
layers: params.include_layers.then(|| {
|
||||
layers
|
||||
.layers_high_to_low()
|
||||
.iter()
|
||||
.map(|layer| layer.as_layer())
|
||||
.collect()
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -177,7 +185,7 @@ impl ConfigService {
|
||||
&self,
|
||||
) -> Result<codex_app_server_protocol::UserSavedConfig, ConfigServiceError> {
|
||||
let layers = self
|
||||
.load_layers_state()
|
||||
.load_thread_agnostic_config()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to load configuration", err))?;
|
||||
|
||||
@@ -194,11 +202,14 @@ impl ConfigService {
|
||||
expected_version: Option<String>,
|
||||
edits: Vec<(String, JsonValue, MergeStrategy)>,
|
||||
) -> Result<ConfigWriteResponse, ConfigServiceError> {
|
||||
let allowed_path = self.codex_home.join(CONFIG_TOML_FILE);
|
||||
let provided_path = file_path
|
||||
.as_ref()
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| allowed_path.clone());
|
||||
let allowed_path =
|
||||
AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, &self.codex_home)
|
||||
.map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?;
|
||||
let provided_path = match file_path {
|
||||
Some(path) => AbsolutePathBuf::from_absolute_path(PathBuf::from(path))
|
||||
.map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?,
|
||||
None => allowed_path.clone(),
|
||||
};
|
||||
|
||||
if !paths_match(&allowed_path, &provided_path) {
|
||||
return Err(ConfigServiceError::write(
|
||||
@@ -208,12 +219,16 @@ impl ConfigService {
|
||||
}
|
||||
|
||||
let layers = self
|
||||
.load_layers_state()
|
||||
.load_thread_agnostic_config()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to load configuration", err))?;
|
||||
let user_layer = match layers.get_user_layer() {
|
||||
Some(layer) => Cow::Borrowed(layer),
|
||||
None => Cow::Owned(create_empty_user_layer(&allowed_path).await?),
|
||||
};
|
||||
|
||||
if let Some(expected) = expected_version.as_deref()
|
||||
&& expected != layers.user.version
|
||||
&& expected != user_layer.version
|
||||
{
|
||||
return Err(ConfigServiceError::write(
|
||||
ConfigWriteErrorCode::ConfigVersionConflict,
|
||||
@@ -221,7 +236,7 @@ impl ConfigService {
|
||||
));
|
||||
}
|
||||
|
||||
let mut user_config = layers.user.config.clone();
|
||||
let mut user_config = user_layer.config.clone();
|
||||
let mut parsed_segments = Vec::new();
|
||||
let mut config_edits = Vec::new();
|
||||
|
||||
@@ -273,7 +288,7 @@ impl ConfigService {
|
||||
)
|
||||
})?;
|
||||
|
||||
let updated_layers = layers.with_user_config(user_config.clone());
|
||||
let updated_layers = layers.with_user_config(&provided_path, user_config.clone());
|
||||
let effective = updated_layers.effective_config();
|
||||
validate_config(&effective).map_err(|err| {
|
||||
ConfigServiceError::write(
|
||||
@@ -296,23 +311,31 @@ impl ConfigService {
|
||||
.map(|_| WriteStatus::OkOverridden)
|
||||
.unwrap_or(WriteStatus::Ok);
|
||||
|
||||
let file_path = provided_path
|
||||
.canonicalize()
|
||||
.unwrap_or(provided_path.clone())
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
Ok(ConfigWriteResponse {
|
||||
status,
|
||||
version: updated_layers.user.version.clone(),
|
||||
file_path,
|
||||
version: updated_layers
|
||||
.get_user_layer()
|
||||
.ok_or_else(|| {
|
||||
ConfigServiceError::write(
|
||||
ConfigWriteErrorCode::UserLayerNotFound,
|
||||
"user layer not found in updated layers",
|
||||
)
|
||||
})?
|
||||
.version
|
||||
.clone(),
|
||||
file_path: provided_path,
|
||||
overridden_metadata: overridden,
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_layers_state(&self) -> std::io::Result<ConfigLayerStack> {
|
||||
/// Loads a "thread-agnostic" config, which means the config layers do not
|
||||
/// include any in-repo .codex/ folders because there is no cwd/project root
|
||||
/// associated with this query.
|
||||
async fn load_thread_agnostic_config(&self) -> std::io::Result<ConfigLayerStack> {
|
||||
let cwd: Option<AbsolutePathBuf> = None;
|
||||
load_config_layers_state(
|
||||
&self.codex_home,
|
||||
cwd,
|
||||
&self.cli_overrides,
|
||||
self.loader_overrides.clone(),
|
||||
)
|
||||
@@ -320,6 +343,32 @@ impl ConfigService {
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_empty_user_layer(
|
||||
config_toml: &AbsolutePathBuf,
|
||||
) -> Result<ConfigLayerEntry, ConfigServiceError> {
|
||||
let toml_value = match tokio::fs::read_to_string(config_toml).await {
|
||||
Ok(contents) => toml::from_str(&contents).map_err(|e| {
|
||||
ConfigServiceError::toml("failed to parse existing user config.toml", e)
|
||||
})?,
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
tokio::fs::write(config_toml, "").await.map_err(|e| {
|
||||
ConfigServiceError::io("failed to create empty user config.toml", e)
|
||||
})?;
|
||||
TomlValue::Table(toml::map::Map::new())
|
||||
} else {
|
||||
return Err(ConfigServiceError::io("failed to read user config.toml", e));
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: config_toml.clone(),
|
||||
},
|
||||
toml_value,
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_value(value: JsonValue) -> Result<Option<TomlValue>, String> {
|
||||
if value.is_null() {
|
||||
return Ok(None);
|
||||
@@ -470,15 +519,15 @@ fn validate_config(value: &TomlValue) -> Result<(), toml::de::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn paths_match(expected: &Path, provided: &Path) -> bool {
|
||||
fn paths_match(expected: impl AsRef<Path>, provided: impl AsRef<Path>) -> bool {
|
||||
if let (Ok(expanded_expected), Ok(expanded_provided)) = (
|
||||
path_utils::normalize_for_path_comparison(expected),
|
||||
path_utils::normalize_for_path_comparison(provided),
|
||||
path_utils::normalize_for_path_comparison(&expected),
|
||||
path_utils::normalize_for_path_comparison(&provided),
|
||||
) {
|
||||
return expanded_expected == expanded_provided;
|
||||
expanded_expected == expanded_provided
|
||||
} else {
|
||||
expected.as_ref() == provided.as_ref()
|
||||
}
|
||||
|
||||
expected == provided
|
||||
}
|
||||
|
||||
fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> {
|
||||
@@ -501,10 +550,29 @@ fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a Tom
|
||||
|
||||
fn override_message(layer: &ConfigLayerSource) -> String {
|
||||
match layer {
|
||||
ConfigLayerSource::Mdm { .. } => "Overridden by managed policy (mdm)".to_string(),
|
||||
ConfigLayerSource::System { .. } => "Overridden by managed config (system)".to_string(),
|
||||
ConfigLayerSource::Mdm { domain, key: _ } => {
|
||||
format!("Overridden by managed policy (MDM): {domain}")
|
||||
}
|
||||
ConfigLayerSource::System { file } => {
|
||||
format!("Overridden by managed config (system): {}", file.display())
|
||||
}
|
||||
ConfigLayerSource::Project { dot_codex_folder } => format!(
|
||||
"Overridden by project config: {}/{CONFIG_TOML_FILE}",
|
||||
dot_codex_folder.display(),
|
||||
),
|
||||
ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(),
|
||||
ConfigLayerSource::User { .. } => "Overridden by user config".to_string(),
|
||||
ConfigLayerSource::User { file } => {
|
||||
format!("Overridden by user config: {}", file.display())
|
||||
}
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file } => {
|
||||
format!(
|
||||
"Overridden by legacy managed_config.toml: {}",
|
||||
file.display()
|
||||
)
|
||||
}
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => {
|
||||
"Overridden by legacy managed configuration from MDM".to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -513,7 +581,10 @@ fn compute_override_metadata(
|
||||
effective: &TomlValue,
|
||||
segments: &[String],
|
||||
) -> Option<OverriddenMetadata> {
|
||||
let user_value = value_at_path(&layers.user.config, segments);
|
||||
let user_value = match layers.get_user_layer() {
|
||||
Some(user_layer) => value_at_path(&user_layer.config, segments),
|
||||
None => return None,
|
||||
};
|
||||
let effective_value = value_at_path(effective, segments);
|
||||
|
||||
if user_value.is_some() && user_value == effective_value {
|
||||
@@ -524,8 +595,7 @@ fn compute_override_metadata(
|
||||
return None;
|
||||
}
|
||||
|
||||
let effective_layer = find_effective_layer(layers, segments);
|
||||
let overriding_layer = effective_layer.unwrap_or_else(|| layers.user.metadata());
|
||||
let overriding_layer = find_effective_layer(layers, segments)?;
|
||||
let message = override_message(&overriding_layer.name);
|
||||
|
||||
Some(OverriddenMetadata {
|
||||
@@ -554,23 +624,13 @@ fn find_effective_layer(
|
||||
layers: &ConfigLayerStack,
|
||||
segments: &[String],
|
||||
) -> Option<ConfigLayerMetadata> {
|
||||
let check =
|
||||
|state: &ConfigLayerEntry| value_at_path(&state.config, segments).map(|_| state.metadata());
|
||||
for layer in layers.layers_high_to_low() {
|
||||
if let Some(meta) = value_at_path(&layer.config, segments).map(|_| layer.metadata()) {
|
||||
return Some(meta);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mdm) = &layers.mdm
|
||||
&& let Some(meta) = check(mdm)
|
||||
{
|
||||
return Some(meta);
|
||||
}
|
||||
if let Some(system) = &layers.system
|
||||
&& let Some(meta) = check(system)
|
||||
{
|
||||
return Some(meta);
|
||||
}
|
||||
if let Some(meta) = check(&layers.session_flags) {
|
||||
return Some(meta);
|
||||
}
|
||||
check(&layers.user)
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -695,6 +755,7 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -713,20 +774,46 @@ remote_compaction = true
|
||||
.get("approval_policy")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerSource::System {
|
||||
file: managed_file.clone(),
|
||||
}
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
},
|
||||
);
|
||||
let layers = response.layers.expect("layers present");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::System { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers.get(1).unwrap().name, ConfigLayerSource::SessionFlags);
|
||||
assert_eq!(
|
||||
layers.last().unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(
|
||||
crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)
|
||||
.expect("system file");
|
||||
assert_eq!(layers.len(), 3, "expected three layers on unix");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(2).unwrap().name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -749,6 +836,7 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -779,7 +867,9 @@ remote_compaction = true
|
||||
.get("approval_policy")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerSource::System { file: managed_file }
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(result.status, WriteStatus::Ok);
|
||||
assert!(result.overridden_metadata.is_none());
|
||||
@@ -849,6 +939,7 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -896,6 +987,7 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -909,14 +1001,14 @@ remote_compaction = true
|
||||
assert_eq!(response.config.model.as_deref(), Some("system"));
|
||||
assert_eq!(
|
||||
response.origins.get("model").expect("origin").name,
|
||||
ConfigLayerSource::System {
|
||||
file: managed_file.clone(),
|
||||
}
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
},
|
||||
);
|
||||
let layers = response.layers.expect("layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::System { file: managed_file }
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers.get(1).unwrap().name, ConfigLayerSource::SessionFlags);
|
||||
assert_eq!(
|
||||
@@ -941,6 +1033,7 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -959,7 +1052,7 @@ remote_compaction = true
|
||||
let overridden = result.overridden_metadata.expect("overridden metadata");
|
||||
assert_eq!(
|
||||
overridden.overriding_layer.name,
|
||||
ConfigLayerSource::System { file: managed_file }
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(overridden.effective_value, serde_json::json!("never"));
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ mod option_duration_secs {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
pub enum UriBasedFileOpener {
|
||||
#[serde(rename = "vscode")]
|
||||
VsCode,
|
||||
@@ -253,7 +253,7 @@ impl UriBasedFileOpener {
|
||||
}
|
||||
|
||||
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct History {
|
||||
/// If true, history entries will not be written to disk.
|
||||
pub persistence: HistoryPersistence,
|
||||
@@ -263,7 +263,7 @@ pub struct History {
|
||||
pub max_bytes: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HistoryPersistence {
|
||||
/// Save all history entries to disk.
|
||||
@@ -275,7 +275,7 @@ pub enum HistoryPersistence {
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
@@ -284,7 +284,7 @@ pub enum OtelHttpProtocol {
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OtelTlsConfig {
|
||||
pub ca_certificate: Option<AbsolutePathBuf>,
|
||||
@@ -293,7 +293,7 @@ pub struct OtelTlsConfig {
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
@@ -315,7 +315,7 @@ pub enum OtelExporterKind {
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
@@ -350,7 +350,7 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
@@ -363,8 +363,30 @@ impl Default for Notifications {
|
||||
}
|
||||
}
|
||||
|
||||
/// How TUI2 should interpret mouse scroll events.
|
||||
///
|
||||
/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse
|
||||
/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to
|
||||
/// infer wheel vs trackpad per stream, or forces a specific behavior.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ScrollInputMode {
|
||||
/// Infer wheel vs trackpad behavior per scroll stream.
|
||||
Auto,
|
||||
/// Always treat scroll events as mouse-wheel input (fixed lines per tick).
|
||||
Wheel,
|
||||
/// Always treat scroll events as trackpad input (fractional accumulation).
|
||||
Trackpad,
|
||||
}
|
||||
|
||||
impl Default for ScrollInputMode {
|
||||
fn default() -> Self {
|
||||
Self::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `true`.
|
||||
@@ -380,6 +402,109 @@ pub struct Tui {
|
||||
/// Defaults to `true`.
|
||||
#[serde(default = "default_true")]
|
||||
pub show_tooltips: bool,
|
||||
|
||||
/// Override the *wheel* event density used to normalize TUI2 scrolling.
|
||||
///
|
||||
/// Terminals generally deliver both mouse wheels and trackpads as discrete `scroll up/down`
|
||||
/// mouse events with direction but no magnitude. Unfortunately, the *number* of raw events
|
||||
/// per physical wheel notch varies by terminal (commonly 1, 3, or 9+). TUI2 uses this value
|
||||
/// to normalize that raw event density into consistent "wheel tick" behavior.
|
||||
///
|
||||
/// Wheel math (conceptually):
|
||||
///
|
||||
/// - A single event contributes `1 / scroll_events_per_tick` tick-equivalents.
|
||||
/// - Wheel-like streams then scale that by `scroll_wheel_lines` so one physical notch scrolls
|
||||
/// a fixed number of lines.
|
||||
///
|
||||
/// Trackpad math is intentionally *not* fully tied to this value: in trackpad-like mode, TUI2
|
||||
/// uses `min(scroll_events_per_tick, 3)` as the divisor so terminals with dense wheel ticks
|
||||
/// (e.g. 9 events per notch) do not make trackpads feel artificially slow.
|
||||
///
|
||||
/// Defaults are derived per terminal from [`crate::terminal::TerminalInfo`] when TUI2 starts.
|
||||
/// See `codex-rs/tui2/docs/scroll_input_model.md` for the probe data and rationale.
|
||||
pub scroll_events_per_tick: Option<u16>,
|
||||
|
||||
/// Override how many transcript lines one physical *wheel notch* should scroll in TUI2.
|
||||
///
|
||||
/// This is the "classic feel" knob. Defaults to 3.
|
||||
///
|
||||
/// Wheel-like per-event contribution is `scroll_wheel_lines / scroll_events_per_tick`. For
|
||||
/// example, in a terminal that emits 9 events per notch, the default `3 / 9` yields 1/3 of a
|
||||
/// line per event and totals 3 lines once the full notch burst arrives.
|
||||
///
|
||||
/// See `codex-rs/tui2/docs/scroll_input_model.md` for details on the stream model and the
|
||||
/// wheel/trackpad heuristic.
|
||||
pub scroll_wheel_lines: Option<u16>,
|
||||
|
||||
/// Override baseline trackpad scroll sensitivity in TUI2.
|
||||
///
|
||||
/// Trackpads do not have discrete notches, but terminals still emit discrete `scroll up/down`
|
||||
/// events. In trackpad-like mode, TUI2 accumulates fractional scroll and only applies whole
|
||||
/// lines to the viewport.
|
||||
///
|
||||
/// Trackpad per-event contribution is:
|
||||
///
|
||||
/// - `scroll_trackpad_lines / min(scroll_events_per_tick, 3)`
|
||||
///
|
||||
/// (plus optional bounded acceleration; see `scroll_trackpad_accel_*`). The `min(..., 3)`
|
||||
/// divisor is deliberate: `scroll_events_per_tick` is calibrated from *wheel* behavior and
|
||||
/// can be much larger than trackpad event density, which would otherwise make trackpads feel
|
||||
/// too slow in dense-wheel terminals.
|
||||
///
|
||||
/// Defaults to 1, meaning one tick-equivalent maps to one transcript line.
|
||||
pub scroll_trackpad_lines: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2.
|
||||
///
|
||||
/// This keeps small swipes precise while allowing large/faster swipes to cover more content.
|
||||
/// Defaults are chosen to address terminals where trackpad event density is comparatively low.
|
||||
///
|
||||
/// Concretely, TUI2 computes an acceleration multiplier for trackpad-like streams:
|
||||
///
|
||||
/// - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)`
|
||||
///
|
||||
/// The multiplier is applied to the stream’s computed line delta (including any carried
|
||||
/// fractional remainder).
|
||||
pub scroll_trackpad_accel_events: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: maximum multiplier applied to trackpad-like streams.
|
||||
///
|
||||
/// Set to 1 to effectively disable trackpad acceleration.
|
||||
///
|
||||
/// See [`Tui::scroll_trackpad_accel_events`] for the exact multiplier formula.
|
||||
pub scroll_trackpad_accel_max: Option<u16>,
|
||||
|
||||
/// Select how TUI2 interprets mouse scroll input.
|
||||
///
|
||||
/// - `auto` (default): infer wheel vs trackpad per scroll stream.
|
||||
/// - `wheel`: always use wheel behavior (fixed lines per wheel notch).
|
||||
/// - `trackpad`: always use trackpad behavior (fractional accumulation; wheel may feel slow).
|
||||
#[serde(default)]
|
||||
pub scroll_mode: ScrollInputMode,
|
||||
|
||||
/// Auto-mode threshold: maximum time (ms) for the first tick-worth of events to arrive.
|
||||
///
|
||||
/// In `scroll_mode = "auto"`, TUI2 starts a stream as trackpad-like (to avoid overshoot) and
|
||||
/// promotes it to wheel-like if `scroll_events_per_tick` events arrive "quickly enough". This
|
||||
/// threshold controls what "quickly enough" means.
|
||||
///
|
||||
/// Most users should leave this unset; it is primarily for terminals that emit wheel ticks
|
||||
/// batched over longer time spans.
|
||||
pub scroll_wheel_tick_detect_max_ms: Option<u64>,
|
||||
|
||||
/// Auto-mode fallback: maximum duration (ms) that a very small stream is still treated as wheel-like.
|
||||
///
|
||||
/// This is only used when `scroll_events_per_tick` is effectively 1 (one event per wheel
|
||||
/// notch). In that case, we cannot observe a "tick completion time", so TUI2 treats a
|
||||
/// short-lived, small stream (<= 2 events) as wheel-like to preserve classic wheel behavior.
|
||||
pub scroll_wheel_like_max_duration_ms: Option<u64>,
|
||||
|
||||
/// Invert mouse scroll direction in TUI2.
|
||||
///
|
||||
/// This flips the scroll sign after terminal detection. It is applied consistently to both
|
||||
/// wheel and trackpad input.
|
||||
#[serde(default)]
|
||||
pub scroll_invert: bool,
|
||||
}
|
||||
|
||||
const fn default_true() -> bool {
|
||||
@@ -389,7 +514,7 @@ const fn default_true() -> bool {
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
@@ -412,7 +537,7 @@ impl Notice {
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -435,7 +560,7 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ShellEnvironmentPolicyInherit {
|
||||
/// "Core" environment variables for the platform. On UNIX, this would
|
||||
@@ -452,7 +577,7 @@ pub enum ShellEnvironmentPolicyInherit {
|
||||
|
||||
/// Policy for building the `env` when spawning a process via either the
|
||||
/// `shell` or `local_shell` tool.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct ShellEnvironmentPolicyToml {
|
||||
pub inherit: Option<ShellEnvironmentPolicyInherit>,
|
||||
|
||||
@@ -474,17 +599,17 @@ pub type EnvironmentVariablePattern = WildMatchPattern<'*', '?'>;
|
||||
/// Deriving the `env` based on this policy works as follows:
|
||||
/// 1. Create an initial map based on the `inherit` policy.
|
||||
/// 2. If `ignore_default_excludes` is false, filter the map using the default
|
||||
/// exclude pattern(s), which are: `"*KEY*"` and `"*TOKEN*"`.
|
||||
/// exclude pattern(s), which are: `"*KEY*"`, `"*SECRET*"`, and `"*TOKEN*"`.
|
||||
/// 3. If `exclude` is not empty, filter the map using the provided patterns.
|
||||
/// 4. Insert any entries from `r#set` into the map.
|
||||
/// 5. If non-empty, filter the map using the `include_only` patterns.
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ShellEnvironmentPolicy {
|
||||
/// Starting point when building the environment.
|
||||
pub inherit: ShellEnvironmentPolicyInherit,
|
||||
|
||||
/// True to skip the check to exclude default environment variables that
|
||||
/// contain "KEY" or "TOKEN" in their name.
|
||||
/// contain "KEY", "SECRET", or "TOKEN" in their name. Defaults to true.
|
||||
pub ignore_default_excludes: bool,
|
||||
|
||||
/// Environment variable names to exclude from the environment.
|
||||
@@ -504,7 +629,7 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
|
||||
fn from(toml: ShellEnvironmentPolicyToml) -> Self {
|
||||
// Default to inheriting the full environment when not specified.
|
||||
let inherit = toml.inherit.unwrap_or(ShellEnvironmentPolicyInherit::All);
|
||||
let ignore_default_excludes = toml.ignore_default_excludes.unwrap_or(false);
|
||||
let ignore_default_excludes = toml.ignore_default_excludes.unwrap_or(true);
|
||||
let exclude = toml
|
||||
.exclude
|
||||
.unwrap_or_default()
|
||||
@@ -531,6 +656,19 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ShellEnvironmentPolicy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inherit: ShellEnvironmentPolicyInherit::All,
|
||||
ignore_default_excludes: true,
|
||||
exclude: Vec::new(),
|
||||
r#set: HashMap::new(),
|
||||
include_only: Vec::new(),
|
||||
use_profile: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -10,7 +10,7 @@ This module is the canonical place to **load and describe Codex configuration la
|
||||
|
||||
Exported from `codex_core::config_loader`:
|
||||
|
||||
- `load_config_layers_state(codex_home, cli_overrides, overrides) -> ConfigLayerStack`
|
||||
- `load_config_layers_state(codex_home, cwd_opt, cli_overrides, overrides) -> ConfigLayerStack`
|
||||
- `ConfigLayerStack`
|
||||
- `effective_config() -> toml::Value`
|
||||
- `origins() -> HashMap<String, ConfigLayerMetadata>`
|
||||
@@ -37,11 +37,14 @@ Most callers want the effective config plus metadata:
|
||||
|
||||
```rust
|
||||
use codex_core::config_loader::{load_config_layers_state, LoaderOverrides};
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
let cli_overrides: Vec<(String, TomlValue)> = Vec::new();
|
||||
let cwd = AbsolutePathBuf::current_dir()?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&cli_overrides,
|
||||
LoaderOverrides::default(),
|
||||
).await?;
|
||||
|
||||
287
codex-rs/core/src/config_loader/config_requirements.rs
Normal file
287
codex-rs/core/src/config_loader/config_requirements.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::config::Constrained;
|
||||
use crate::config::ConstraintError;
|
||||
|
||||
/// Normalized version of [`ConfigRequirementsToml`] after deserialization and
|
||||
/// normalization.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ConfigRequirements {
|
||||
pub approval_policy: Constrained<AskForApproval>,
|
||||
pub sandbox_policy: Constrained<SandboxPolicy>,
|
||||
}
|
||||
|
||||
impl Default for ConfigRequirements {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
approval_policy: Constrained::allow_any_from_default(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::ReadOnly),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Base config deserialized from /etc/codex/requirements.toml or MDM.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigRequirementsToml {
|
||||
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
|
||||
pub allowed_sandbox_modes: Option<Vec<SandboxModeRequirement>>,
|
||||
}
|
||||
|
||||
/// Currently, `external-sandbox` is not supported in config.toml, but it is
|
||||
/// supported through programmatic use.
|
||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq)]
|
||||
pub enum SandboxModeRequirement {
|
||||
#[serde(rename = "read-only")]
|
||||
ReadOnly,
|
||||
|
||||
#[serde(rename = "workspace-write")]
|
||||
WorkspaceWrite,
|
||||
|
||||
#[serde(rename = "danger-full-access")]
|
||||
DangerFullAccess,
|
||||
|
||||
#[serde(rename = "external-sandbox")]
|
||||
ExternalSandbox,
|
||||
}
|
||||
|
||||
impl From<SandboxMode> for SandboxModeRequirement {
|
||||
fn from(mode: SandboxMode) -> Self {
|
||||
match mode {
|
||||
SandboxMode::ReadOnly => SandboxModeRequirement::ReadOnly,
|
||||
SandboxMode::WorkspaceWrite => SandboxModeRequirement::WorkspaceWrite,
|
||||
SandboxMode::DangerFullAccess => SandboxModeRequirement::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigRequirementsToml {
|
||||
/// For every field in `other` that is `Some`, if the corresponding field in
|
||||
/// `self` is `None`, copy the value from `other` into `self`.
|
||||
pub fn merge_unset_fields(&mut self, mut other: ConfigRequirementsToml) {
|
||||
macro_rules! fill_missing_take {
|
||||
($base:expr, $other:expr, { $($field:ident),+ $(,)? }) => {
|
||||
$(
|
||||
if $base.$field.is_none() {
|
||||
if let Some(value) = $other.$field.take() {
|
||||
$base.$field = Some(value);
|
||||
}
|
||||
}
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
fill_missing_take!(self, other, { allowed_approval_policies, allowed_sandbox_modes });
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ConfigRequirementsToml> for ConfigRequirements {
|
||||
type Error = ConstraintError;
|
||||
|
||||
fn try_from(toml: ConfigRequirementsToml) -> Result<Self, Self::Error> {
|
||||
let ConfigRequirementsToml {
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
} = toml;
|
||||
let approval_policy: Constrained<AskForApproval> = match allowed_approval_policies {
|
||||
Some(policies) => {
|
||||
if let Some(first) = policies.first() {
|
||||
Constrained::allow_values(*first, policies)?
|
||||
} else {
|
||||
return Err(ConstraintError::empty_field("allowed_approval_policies"));
|
||||
}
|
||||
}
|
||||
None => Constrained::allow_any_from_default(),
|
||||
};
|
||||
|
||||
// TODO(gt): `ConfigRequirementsToml` should let the author specify the
|
||||
// default `SandboxPolicy`? Should do this for `AskForApproval` too?
|
||||
//
|
||||
// Currently, we force ReadOnly as the default policy because two of
|
||||
// the other variants (WorkspaceWrite, ExternalSandbox) require
|
||||
// additional parameters. Ultimately, we should expand the config
|
||||
// format to allow specifying those parameters.
|
||||
let default_sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
let sandbox_policy: Constrained<SandboxPolicy> = match allowed_sandbox_modes {
|
||||
Some(modes) => {
|
||||
if !modes.contains(&SandboxModeRequirement::ReadOnly) {
|
||||
return Err(ConstraintError::invalid_value(
|
||||
"allowed_sandbox_modes",
|
||||
"must include 'read-only' to allow any SandboxPolicy",
|
||||
));
|
||||
};
|
||||
|
||||
Constrained::new(default_sandbox_policy, move |candidate| {
|
||||
let mode = match candidate {
|
||||
SandboxPolicy::ReadOnly => SandboxModeRequirement::ReadOnly,
|
||||
SandboxPolicy::WorkspaceWrite { .. } => {
|
||||
SandboxModeRequirement::WorkspaceWrite
|
||||
}
|
||||
SandboxPolicy::DangerFullAccess => SandboxModeRequirement::DangerFullAccess,
|
||||
SandboxPolicy::ExternalSandbox { .. } => {
|
||||
SandboxModeRequirement::ExternalSandbox
|
||||
}
|
||||
};
|
||||
if modes.contains(&mode) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ConstraintError::invalid_value(
|
||||
format!("{candidate:?}"),
|
||||
format!("{modes:?}"),
|
||||
))
|
||||
}
|
||||
})?
|
||||
}
|
||||
None => Constrained::allow_any(default_sandbox_policy),
|
||||
};
|
||||
Ok(ConfigRequirements {
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
use codex_protocol::protocol::NetworkAccess;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use toml::from_str;
|
||||
|
||||
#[test]
|
||||
fn merge_unset_fields_only_fills_missing_values() -> Result<()> {
|
||||
let source: ConfigRequirementsToml = from_str(
|
||||
r#"
|
||||
allowed_approval_policies = ["on-request"]
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut empty_target: ConfigRequirementsToml = from_str(
|
||||
r#"
|
||||
# intentionally left unset
|
||||
"#,
|
||||
)?;
|
||||
empty_target.merge_unset_fields(source.clone());
|
||||
assert_eq!(
|
||||
empty_target.allowed_approval_policies,
|
||||
Some(vec![AskForApproval::OnRequest])
|
||||
);
|
||||
|
||||
let mut populated_target: ConfigRequirementsToml = from_str(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
"#,
|
||||
)?;
|
||||
populated_target.merge_unset_fields(source);
|
||||
assert_eq!(
|
||||
populated_target.allowed_approval_policies,
|
||||
Some(vec![AskForApproval::Never])
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_allowed_approval_policies() -> Result<()> {
|
||||
let toml_str = r#"
|
||||
allowed_approval_policies = ["untrusted", "on-request"]
|
||||
"#;
|
||||
let config: ConfigRequirementsToml = from_str(toml_str)?;
|
||||
let requirements = ConfigRequirements::try_from(config)?;
|
||||
|
||||
assert_eq!(
|
||||
requirements.approval_policy.value(),
|
||||
AskForApproval::UnlessTrusted,
|
||||
"currently, there is no way to specify the default value for approval policy in the toml, so it picks the first allowed value"
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::UnlessTrusted)
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnFailure),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "OnFailure".into(),
|
||||
allowed: "[UnlessTrusted, OnRequest]".into(),
|
||||
})
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements.approval_policy.can_set(&AskForApproval::Never),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "Never".into(),
|
||||
allowed: "[UnlessTrusted, OnRequest]".into(),
|
||||
})
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ReadOnly)
|
||||
.is_ok()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_allowed_sandbox_modes() -> Result<()> {
|
||||
let toml_str = r#"
|
||||
allowed_sandbox_modes = ["read-only", "workspace-write"]
|
||||
"#;
|
||||
let config: ConfigRequirementsToml = from_str(toml_str)?;
|
||||
let requirements = ConfigRequirements::try_from(config)?;
|
||||
|
||||
let root = if cfg!(windows) { "C:\\repo" } else { "/repo" };
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ReadOnly)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![AbsolutePathBuf::from_absolute_path(root)?],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
})
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::DangerFullAccess),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "DangerFullAccess".into(),
|
||||
allowed: "[ReadOnly, WorkspaceWrite]".into(),
|
||||
})
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
}),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "ExternalSandbox { network_access: Restricted }".into(),
|
||||
allowed: "[ReadOnly, WorkspaceWrite]".into(),
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use super::LoaderOverrides;
|
||||
#[cfg(target_os = "macos")]
|
||||
use super::macos::load_managed_admin_config_layer;
|
||||
use super::overrides::default_empty_table;
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
@@ -11,11 +11,18 @@ use toml::Value as TomlValue;
|
||||
#[cfg(unix)]
|
||||
const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct MangedConfigFromFile {
|
||||
pub managed_config: TomlValue,
|
||||
pub file: AbsolutePathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct LoadedConfigLayers {
|
||||
pub base: TomlValue,
|
||||
pub managed_config: Option<TomlValue>,
|
||||
pub managed_preferences: Option<TomlValue>,
|
||||
/// If present, data read from a file such as `/etc/codex/managed_config.toml`.
|
||||
pub managed_config: Option<MangedConfigFromFile>,
|
||||
/// If present, data read from managed preferences (macOS only).
|
||||
pub managed_config_from_mdm: Option<TomlValue>,
|
||||
}
|
||||
|
||||
pub(super) async fn load_config_layers_internal(
|
||||
@@ -26,56 +33,61 @@ pub(super) async fn load_config_layers_internal(
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path =
|
||||
managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home));
|
||||
let managed_config_path = AbsolutePathBuf::from_absolute_path(
|
||||
managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home)),
|
||||
)?;
|
||||
|
||||
let user_config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let user_config = read_config_from_path(&user_config_path, true).await?;
|
||||
let managed_config = read_config_from_path(&managed_config_path, false).await?;
|
||||
let managed_config = read_config_from_path(&managed_config_path, false)
|
||||
.await?
|
||||
.map(|managed_config| MangedConfigFromFile {
|
||||
managed_config,
|
||||
file: managed_config_path.clone(),
|
||||
});
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
let managed_preferences =
|
||||
load_managed_admin_config_layer(managed_preferences_base64.as_deref()).await?;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let managed_preferences = load_managed_admin_config_layer(None).await?;
|
||||
let managed_preferences = None;
|
||||
|
||||
Ok(LoadedConfigLayers {
|
||||
base: user_config.unwrap_or_else(default_empty_table),
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
managed_config_from_mdm: managed_preferences,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn read_config_from_path(
|
||||
path: &Path,
|
||||
path: impl AsRef<Path>,
|
||||
log_missing_as_info: bool,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
match fs::read_to_string(path).await {
|
||||
match fs::read_to_string(path.as_ref()).await {
|
||||
Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
|
||||
Ok(value) => Ok(Some(value)),
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse {}: {err}", path.display());
|
||||
tracing::error!("Failed to parse {}: {err}", path.as_ref().display());
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
},
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||
if log_missing_as_info {
|
||||
tracing::info!("{} not found, using defaults", path.display());
|
||||
tracing::info!("{} not found, using defaults", path.as_ref().display());
|
||||
} else {
|
||||
tracing::debug!("{} not found", path.display());
|
||||
tracing::debug!("{} not found", path.as_ref().display());
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to read {}: {err}", path.display());
|
||||
tracing::error!("Failed to read {}: {err}", path.as_ref().display());
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,118 +1,146 @@
|
||||
use super::config_requirements::ConfigRequirementsToml;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
use core_foundation::string::CFString;
|
||||
use core_foundation::string::CFStringRef;
|
||||
use std::ffi::c_void;
|
||||
use std::io;
|
||||
use tokio::task;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod native {
|
||||
use super::*;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
use core_foundation::string::CFString;
|
||||
use core_foundation::string::CFStringRef;
|
||||
use std::ffi::c_void;
|
||||
use tokio::task;
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(
|
||||
key: CFStringRef,
|
||||
application_id: CFStringRef,
|
||||
) -> *mut c_void;
|
||||
}
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
}
|
||||
|
||||
pub(super) fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed preferences root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
pub(crate) use native::load_managed_admin_config_layer;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
_override_base64: Option<&str>,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
Ok(None)
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_config_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed config load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed config load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed config"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_CONFIG_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_config_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_requirements_toml(
|
||||
target: &mut ConfigRequirementsToml,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
if !trimmed.is_empty() {
|
||||
target.merge_unset_fields(parse_managed_requirements_base64(trimmed)?);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_requirements).await {
|
||||
Ok(result) => {
|
||||
if let Some(requirements) = result? {
|
||||
target.merge_unset_fields(requirements);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed requirements load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed requirements load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed requirements"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_requirements() -> io::Result<Option<ConfigRequirementsToml>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_REQUIREMENTS_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_requirements_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn load_managed_preference(key_name: &str) -> io::Result<Option<String>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(key: CFStringRef, application_id: CFStringRef) -> *mut c_void;
|
||||
}
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
CFString::new(key_name).as_concrete_TypeRef(),
|
||||
CFString::new(MANAGED_PREFERENCES_APPLICATION_ID).as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {MANAGED_PREFERENCES_APPLICATION_ID} key {key_name} not found",
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) }.to_string();
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
fn parse_managed_config_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
match toml::from_str::<TomlValue>(&decode_managed_preferences_base64(encoded)?) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!("Managed config TOML must have a table at the root, found {other:?}",);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed config root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed config TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_managed_requirements_base64(encoded: &str) -> io::Result<ConfigRequirementsToml> {
|
||||
toml::from_str::<ConfigRequirementsToml>(&decode_managed_preferences_base64(encoded)?).map_err(
|
||||
|err| {
|
||||
tracing::error!("Failed to parse managed requirements TOML: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn decode_managed_preferences_base64(encoded: &str) -> io::Result<String> {
|
||||
String::from_utf8(BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed value as base64: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?)
|
||||
.map_err(|err| {
|
||||
tracing::error!("Managed value base64 contents were not valid UTF-8: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
mod config_requirements;
|
||||
mod fingerprint;
|
||||
mod layer_io;
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos;
|
||||
mod merge;
|
||||
mod overrides;
|
||||
@@ -9,74 +11,597 @@ mod state;
|
||||
mod tests;
|
||||
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::layer_io::LoadedConfigLayers;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use serde::Deserialize;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
pub use config_requirements::ConfigRequirements;
|
||||
pub use merge::merge_toml_values;
|
||||
pub use state::ConfigLayerEntry;
|
||||
pub use state::ConfigLayerStack;
|
||||
pub use state::ConfigLayerStackOrdering;
|
||||
pub use state::LoaderOverrides;
|
||||
|
||||
const MDM_PREFERENCES_DOMAIN: &str = "com.openai.codex";
|
||||
const MDM_PREFERENCES_KEY: &str = "config_toml_base64";
|
||||
/// On Unix systems, load requirements from this file path, if present.
|
||||
const DEFAULT_REQUIREMENTS_TOML_FILE_UNIX: &str = "/etc/codex/requirements.toml";
|
||||
|
||||
/// Configuration layering pipeline (top overrides bottom):
|
||||
/// On Unix systems, load default settings from this file path, if present.
|
||||
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
|
||||
/// as skills/ and rules/ will also be honored.
|
||||
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
|
||||
|
||||
const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
|
||||
|
||||
/// To build up the set of admin-enforced constraints, we build up from multiple
|
||||
/// configuration layers in the following order, but a constraint defined in an
|
||||
/// earlier layer cannot be overridden by a later layer:
|
||||
///
|
||||
/// +-------------------------+
|
||||
/// | Managed preferences (*) |
|
||||
/// +-------------------------+
|
||||
/// ^
|
||||
/// |
|
||||
/// +-------------------------+
|
||||
/// | managed_config.toml |
|
||||
/// +-------------------------+
|
||||
/// ^
|
||||
/// |
|
||||
/// +-------------------------+
|
||||
/// | config.toml (base) |
|
||||
/// +-------------------------+
|
||||
/// - admin: managed preferences (*)
|
||||
/// - system `/etc/codex/requirements.toml`
|
||||
///
|
||||
/// For backwards compatibility, we also load from
|
||||
/// `/etc/codex/managed_config.toml` and map it to
|
||||
/// `/etc/codex/requirements.toml`.
|
||||
///
|
||||
/// Configuration is built up from multiple layers in the following order:
|
||||
///
|
||||
/// - admin: managed preferences (*)
|
||||
/// - system `/etc/codex/config.toml`
|
||||
/// - user `${CODEX_HOME}/config.toml`
|
||||
/// - cwd `${PWD}/config.toml`
|
||||
/// - tree parent directories up to root looking for `./.codex/config.toml`
|
||||
/// - repo `$(git rev-parse --show-toplevel)/.codex/config.toml`
|
||||
/// - runtime e.g., --config flags, model selector in UI
|
||||
///
|
||||
/// (*) Only available on macOS via managed device profiles.
|
||||
///
|
||||
/// See https://developers.openai.com/codex/security for details.
|
||||
///
|
||||
/// When loading the config stack for a thread, there should be a `cwd`
|
||||
/// associated with it such that `cwd` should be `Some(...)`. Only for
|
||||
/// thread-agnostic config loading (e.g., for the app server's `/config`
|
||||
/// endpoint) should `cwd` be `None`.
|
||||
pub async fn load_config_layers_state(
|
||||
codex_home: &Path,
|
||||
cwd: Option<AbsolutePathBuf>,
|
||||
cli_overrides: &[(String, TomlValue)],
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<ConfigLayerStack> {
|
||||
let managed_config_path = overrides
|
||||
.managed_config_path
|
||||
.clone()
|
||||
.unwrap_or_else(|| layer_io::managed_config_default_path(codex_home));
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
let layers = layer_io::load_config_layers_internal(codex_home, overrides).await?;
|
||||
let cli_overrides_layer = overrides::build_cli_overrides_layer(cli_overrides);
|
||||
let user_file = AbsolutePathBuf::from_absolute_path(codex_home.join(CONFIG_TOML_FILE))?;
|
||||
#[cfg(target_os = "macos")]
|
||||
macos::load_managed_admin_requirements_toml(
|
||||
&mut config_requirements_toml,
|
||||
overrides
|
||||
.macos_managed_config_requirements_base64
|
||||
.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let system = match layers.managed_config {
|
||||
Some(cfg) => {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(managed_config_path.clone())?;
|
||||
Some(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::System { file: system_file },
|
||||
cfg,
|
||||
))
|
||||
}
|
||||
None => None,
|
||||
// Honor /etc/codex/requirements.toml.
|
||||
if cfg!(unix) {
|
||||
load_requirements_toml(
|
||||
&mut config_requirements_toml,
|
||||
DEFAULT_REQUIREMENTS_TOML_FILE_UNIX,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Make a best-effort to support the legacy `managed_config.toml` as a
|
||||
// requirements specification.
|
||||
let loaded_config_layers = layer_io::load_config_layers_internal(codex_home, overrides).await?;
|
||||
load_requirements_from_legacy_scheme(
|
||||
&mut config_requirements_toml,
|
||||
loaded_config_layers.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut layers = Vec::<ConfigLayerEntry>::new();
|
||||
|
||||
// Include an entry for the "system" config folder, loading its config.toml,
|
||||
// if it exists.
|
||||
let system_config_toml_file = if cfg!(unix) {
|
||||
Some(AbsolutePathBuf::from_absolute_path(
|
||||
SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)?)
|
||||
} else {
|
||||
// TODO(gt): Determine the path to load on Windows.
|
||||
None
|
||||
};
|
||||
if let Some(system_config_toml_file) = system_config_toml_file {
|
||||
let system_layer =
|
||||
load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::System {
|
||||
file: system_config_toml_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(system_layer);
|
||||
}
|
||||
|
||||
Ok(ConfigLayerStack {
|
||||
user: ConfigLayerEntry::new(ConfigLayerSource::User { file: user_file }, layers.base),
|
||||
session_flags: ConfigLayerEntry::new(ConfigLayerSource::SessionFlags, cli_overrides_layer),
|
||||
system,
|
||||
mdm: layers.managed_preferences.map(|cfg| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Mdm {
|
||||
domain: MDM_PREFERENCES_DOMAIN.to_string(),
|
||||
key: MDM_PREFERENCES_KEY.to_string(),
|
||||
},
|
||||
cfg,
|
||||
)
|
||||
}),
|
||||
// Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file
|
||||
// exists, but is malformed, then this error should be propagated to the
|
||||
// user.
|
||||
let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?;
|
||||
let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(user_layer);
|
||||
|
||||
if let Some(cwd) = cwd {
|
||||
let mut merged_so_far = TomlValue::Table(toml::map::Map::new());
|
||||
for layer in &layers {
|
||||
merge_toml_values(&mut merged_so_far, &layer.config);
|
||||
}
|
||||
let project_root_markers = project_root_markers_from_config(&merged_so_far)?
|
||||
.unwrap_or_else(default_project_root_markers);
|
||||
|
||||
let project_root = find_project_root(&cwd, &project_root_markers).await?;
|
||||
let project_layers = load_project_layers(&cwd, &project_root).await?;
|
||||
layers.extend(project_layers);
|
||||
}
|
||||
|
||||
// Add a layer for runtime overrides from the CLI or UI, if any exist.
|
||||
if !cli_overrides.is_empty() {
|
||||
let cli_overrides_layer = overrides::build_cli_overrides_layer(cli_overrides);
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::SessionFlags,
|
||||
cli_overrides_layer,
|
||||
));
|
||||
}
|
||||
|
||||
// Make a best-effort to support the legacy `managed_config.toml` as a
|
||||
// config layer on top of everything else. For fields in
|
||||
// `managed_config.toml` that do not have an equivalent in
|
||||
// `ConfigRequirements`, note users can still override these values on a
|
||||
// per-turn basis in the TUI and VS Code.
|
||||
let LoadedConfigLayers {
|
||||
managed_config,
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
if let Some(config) = managed_config {
|
||||
let managed_parent = config.file.as_path().parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Managed config file {} has no parent directory",
|
||||
config.file.as_path().display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let managed_config =
|
||||
resolve_relative_paths_in_config_toml(config.managed_config, managed_parent)?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: config.file },
|
||||
managed_config,
|
||||
));
|
||||
}
|
||||
if let Some(config) = managed_config_from_mdm {
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm,
|
||||
config,
|
||||
));
|
||||
}
|
||||
|
||||
ConfigLayerStack::new(layers, config_requirements_toml.try_into()?)
|
||||
}
|
||||
|
||||
/// Attempts to load a config.toml file from `config_toml`.
|
||||
/// - If the file exists and is valid TOML, passes the parsed `toml::Value` to
|
||||
/// `create_entry` and returns the resulting layer entry.
|
||||
/// - If the file does not exist, uses an empty `Table` with `create_entry` and
|
||||
/// returns the resulting layer entry.
|
||||
/// - If there is an error reading the file or parsing the TOML, returns an
|
||||
/// error.
|
||||
async fn load_config_toml_for_required_layer(
|
||||
config_toml: impl AsRef<Path>,
|
||||
create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry,
|
||||
) -> io::Result<ConfigLayerEntry> {
|
||||
let toml_file = config_toml.as_ref();
|
||||
let toml_value = match tokio::fs::read_to_string(toml_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Error parsing config file {}: {e}", toml_file.display()),
|
||||
)
|
||||
})?;
|
||||
let config_parent = toml_file.parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Config file {} has no parent directory",
|
||||
toml_file.display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
resolve_relative_paths_in_config_toml(config, config_parent)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Ok(TomlValue::Table(toml::map::Map::new()))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!("Failed to read config file {}: {e}", toml_file.display()),
|
||||
))
|
||||
}
|
||||
}
|
||||
}?;
|
||||
|
||||
Ok(create_entry(toml_value))
|
||||
}
|
||||
|
||||
/// If available, apply requirements from `/etc/codex/requirements.toml` to
|
||||
/// `config_requirements_toml` by filling in any unset fields.
|
||||
async fn load_requirements_toml(
|
||||
config_requirements_toml: &mut ConfigRequirementsToml,
|
||||
requirements_toml_file: impl AsRef<Path>,
|
||||
) -> io::Result<()> {
|
||||
match tokio::fs::read_to_string(&requirements_toml_file).await {
|
||||
Ok(contents) => {
|
||||
let requirements_config: ConfigRequirementsToml =
|
||||
toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing requirements file {}: {e}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
config_requirements_toml.merge_unset_fields(requirements_config);
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
return Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!(
|
||||
"Failed to read requirements file {}: {e}",
|
||||
requirements_toml_file.as_ref().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn load_requirements_from_legacy_scheme(
|
||||
config_requirements_toml: &mut ConfigRequirementsToml,
|
||||
loaded_config_layers: LoadedConfigLayers,
|
||||
) -> io::Result<()> {
|
||||
// In this implementation, earlier layers cannot be overwritten by later
|
||||
// layers, so list managed_config_from_mdm first because it has the highest
|
||||
// precedence.
|
||||
let LoadedConfigLayers {
|
||||
managed_config,
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
for config in [
|
||||
managed_config_from_mdm,
|
||||
managed_config.map(|c| c.managed_config),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
{
|
||||
let legacy_config: LegacyManagedConfigToml =
|
||||
config.try_into().map_err(|err: toml::de::Error| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to parse config requirements as TOML: {err}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let new_requirements_toml = ConfigRequirementsToml::from(legacy_config);
|
||||
config_requirements_toml.merge_unset_fields(new_requirements_toml);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads `project_root_markers` from the [toml::Value] produced by merging
|
||||
/// `config.toml` from the config layers in the stack preceding
|
||||
/// [ConfigLayerSource::Project].
|
||||
///
|
||||
/// Invariants:
|
||||
/// - If `project_root_markers` is not specified, returns `Ok(None)`.
|
||||
/// - If `project_root_markers` is specified, returns `Ok(Some(markers))` where
|
||||
/// `markers` is a `Vec<String>` (including `Ok(Some(Vec::new()))` for an
|
||||
/// empty array, which indicates that root detection should be disabled).
|
||||
/// - Returns an error if `project_root_markers` is specified but is not an
|
||||
/// array of strings.
|
||||
fn project_root_markers_from_config(config: &TomlValue) -> io::Result<Option<Vec<String>>> {
|
||||
let Some(table) = config.as_table() else {
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(markers_value) = table.get("project_root_markers") else {
|
||||
return Ok(None);
|
||||
};
|
||||
let TomlValue::Array(entries) = markers_value else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
if entries.is_empty() {
|
||||
return Ok(Some(Vec::new()));
|
||||
}
|
||||
let mut markers = Vec::new();
|
||||
for entry in entries {
|
||||
let Some(marker) = entry.as_str() else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
markers.push(marker.to_string());
|
||||
}
|
||||
Ok(Some(markers))
|
||||
}
|
||||
|
||||
fn default_project_root_markers() -> Vec<String> {
|
||||
DEFAULT_PROJECT_ROOT_MARKERS
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Takes a `toml::Value` parsed from a config.toml file and walks through it,
|
||||
/// resolving any `AbsolutePathBuf` fields against `base_dir`, returning a new
|
||||
/// `toml::Value` with the same shape but with paths resolved.
|
||||
///
|
||||
/// This ensures that multiple config layers can be merged together correctly
|
||||
/// even if they were loaded from different directories.
|
||||
fn resolve_relative_paths_in_config_toml(
|
||||
value_from_config_toml: TomlValue,
|
||||
base_dir: &Path,
|
||||
) -> io::Result<TomlValue> {
|
||||
// Use the serialize/deserialize round-trip to convert the
|
||||
// `toml::Value` into a `ConfigToml` with `AbsolutePath
|
||||
let _guard = AbsolutePathBufGuard::new(base_dir);
|
||||
let Ok(resolved) = value_from_config_toml.clone().try_into::<ConfigToml>() else {
|
||||
return Ok(value_from_config_toml);
|
||||
};
|
||||
drop(_guard);
|
||||
|
||||
let resolved_value = TomlValue::try_from(resolved).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to serialize resolved config: {e}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(copy_shape_from_original(
|
||||
&value_from_config_toml,
|
||||
&resolved_value,
|
||||
))
|
||||
}
|
||||
|
||||
/// Ensure that every field in `original` is present in the returned
|
||||
/// `toml::Value`, taking the value from `resolved` where possible. This ensures
|
||||
/// the fields that we "removed" during the serialize/deserialize round-trip in
|
||||
/// `resolve_config_paths` are preserved, out of an abundance of caution.
|
||||
fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlValue {
|
||||
match (original, resolved) {
|
||||
(TomlValue::Table(original_table), TomlValue::Table(resolved_table)) => {
|
||||
let mut table = toml::map::Map::new();
|
||||
for (key, original_value) in original_table {
|
||||
let resolved_value = resolved_table.get(key).unwrap_or(original_value);
|
||||
table.insert(
|
||||
key.clone(),
|
||||
copy_shape_from_original(original_value, resolved_value),
|
||||
);
|
||||
}
|
||||
TomlValue::Table(table)
|
||||
}
|
||||
(TomlValue::Array(original_array), TomlValue::Array(resolved_array)) => {
|
||||
let mut items = Vec::new();
|
||||
for (index, original_value) in original_array.iter().enumerate() {
|
||||
let resolved_value = resolved_array.get(index).unwrap_or(original_value);
|
||||
items.push(copy_shape_from_original(original_value, resolved_value));
|
||||
}
|
||||
TomlValue::Array(items)
|
||||
}
|
||||
(_, resolved_value) => resolved_value.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_project_root(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root_markers: &[String],
|
||||
) -> io::Result<AbsolutePathBuf> {
|
||||
if project_root_markers.is_empty() {
|
||||
return Ok(cwd.clone());
|
||||
}
|
||||
|
||||
for ancestor in cwd.as_path().ancestors() {
|
||||
for marker in project_root_markers {
|
||||
let marker_path = ancestor.join(marker);
|
||||
if tokio::fs::metadata(&marker_path).await.is_ok() {
|
||||
return AbsolutePathBuf::from_absolute_path(ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(cwd.clone())
|
||||
}
|
||||
|
||||
/// Return the appropriate list of layers (each with
|
||||
/// [ConfigLayerSource::Project] as the source) between `cwd` and
|
||||
/// `project_root`, inclusive. The list is ordered in _increasing_ precdence,
|
||||
/// starting from folders closest to `project_root` (which is the lowest
|
||||
/// precedence) to those closest to `cwd` (which is the highest precedence).
|
||||
async fn load_project_layers(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root: &AbsolutePathBuf,
|
||||
) -> io::Result<Vec<ConfigLayerEntry>> {
|
||||
let mut dirs = cwd
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.scan(false, |done, a| {
|
||||
if *done {
|
||||
None
|
||||
} else {
|
||||
if a == project_root.as_path() {
|
||||
*done = true;
|
||||
}
|
||||
Some(a)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
dirs.reverse();
|
||||
|
||||
let mut layers = Vec::new();
|
||||
for dir in dirs {
|
||||
let dot_codex = dir.join(".codex");
|
||||
if !tokio::fs::metadata(&dot_codex)
|
||||
.await
|
||||
.map(|meta| meta.is_dir())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?;
|
||||
let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?;
|
||||
match tokio::fs::read_to_string(&config_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing project config file {}: {e}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let config =
|
||||
resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
config,
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == io::ErrorKind::NotFound {
|
||||
// If there is no config.toml file, record an empty entry
|
||||
// for this project layer, as this may still have subfolders
|
||||
// that are significant in the overall ConfigLayerStack.
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
));
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
err.kind(),
|
||||
format!(
|
||||
"Failed to read project config file {}: {err}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(layers)
|
||||
}
|
||||
|
||||
/// The legacy mechanism for specifying admin-enforced configuration is to read
|
||||
/// from a file like `/etc/codex/managed_config.toml` that has the same
|
||||
/// structure as `config.toml` where fields like `approval_policy` can specify
|
||||
/// exactly one value rather than a list of allowed values.
|
||||
///
|
||||
/// If present, re-interpret `managed_config.toml` as a `requirements.toml`
|
||||
/// where each specified field is treated as a constraint allowing only that
|
||||
/// value.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
struct LegacyManagedConfigToml {
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_mode: Option<SandboxMode>,
|
||||
}
|
||||
|
||||
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
|
||||
fn from(legacy: LegacyManagedConfigToml) -> Self {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
let LegacyManagedConfigToml {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
} = legacy;
|
||||
if let Some(approval_policy) = approval_policy {
|
||||
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
|
||||
}
|
||||
if let Some(sandbox_mode) = sandbox_mode {
|
||||
config_requirements_toml.allowed_sandbox_modes = Some(vec![sandbox_mode.into()]);
|
||||
}
|
||||
config_requirements_toml
|
||||
}
|
||||
}
|
||||
|
||||
// Cannot name this `mod tests` because of tests.rs in this folder.
|
||||
#[cfg(test)]
|
||||
mod unit_tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn ensure_resolve_relative_paths_in_config_toml_preserves_all_fields() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let base_dir = tmp.path();
|
||||
let contents = r#"
|
||||
# This is a field recognized by config.toml that is an AbsolutePathBuf in
|
||||
# the ConfigToml struct.
|
||||
experimental_instructions_file = "./some_file.md"
|
||||
|
||||
# This is a field recognized by config.toml.
|
||||
model = "gpt-1000"
|
||||
|
||||
# This is a field not recognized by config.toml.
|
||||
foo = "xyzzy"
|
||||
"#;
|
||||
let user_config: TomlValue = toml::from_str(contents)?;
|
||||
|
||||
let normalized_toml_value = resolve_relative_paths_in_config_toml(user_config, base_dir)?;
|
||||
let mut expected_toml_value = toml::map::Map::new();
|
||||
expected_toml_value.insert(
|
||||
"experimental_instructions_file".to_string(),
|
||||
TomlValue::String(
|
||||
AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)?
|
||||
.as_path()
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
),
|
||||
);
|
||||
expected_toml_value.insert(
|
||||
"model".to_string(),
|
||||
TomlValue::String("gpt-1000".to_string()),
|
||||
);
|
||||
expected_toml_value.insert("foo".to_string(), TomlValue::String("xyzzy".to_string()));
|
||||
assert_eq!(normalized_toml_value, TomlValue::Table(expected_toml_value));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
|
||||
use super::fingerprint::record_origins;
|
||||
use super::fingerprint::version_for_toml;
|
||||
use super::merge::merge_toml_values;
|
||||
use codex_app_server_protocol::ConfigLayer;
|
||||
use codex_app_server_protocol::ConfigLayerMetadata;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
/// LoaderOverrides overrides managed configuration inputs (primarily for tests).
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct LoaderOverrides {
|
||||
pub managed_config_path: Option<PathBuf>,
|
||||
//TODO(gt): Add a macos_ prefix to this field and remove the target_os check.
|
||||
#[cfg(target_os = "macos")]
|
||||
pub managed_preferences_base64: Option<String>,
|
||||
pub macos_managed_config_requirements_base64: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ConfigLayerEntry {
|
||||
pub name: ConfigLayerSource,
|
||||
pub config: TomlValue,
|
||||
@@ -47,34 +53,113 @@ impl ConfigLayerEntry {
|
||||
config: serde_json::to_value(&self.config).unwrap_or(JsonValue::Null),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the `.codex/` folder associated with this config layer, if any.
|
||||
pub fn config_folder(&self) -> Option<AbsolutePathBuf> {
|
||||
match &self.name {
|
||||
ConfigLayerSource::Mdm { .. } => None,
|
||||
ConfigLayerSource::System { file } => file.parent(),
|
||||
ConfigLayerSource::User { file } => file.parent(),
|
||||
ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder.clone()),
|
||||
ConfigLayerSource::SessionFlags => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConfigLayerStackOrdering {
|
||||
LowestPrecedenceFirst,
|
||||
HighestPrecedenceFirst,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigLayerStack {
|
||||
pub user: ConfigLayerEntry,
|
||||
pub session_flags: ConfigLayerEntry,
|
||||
pub system: Option<ConfigLayerEntry>,
|
||||
pub mdm: Option<ConfigLayerEntry>,
|
||||
/// Layers are listed from lowest precedence (base) to highest (top), so
|
||||
/// later entries in the Vec override earlier ones.
|
||||
layers: Vec<ConfigLayerEntry>,
|
||||
|
||||
/// Index into [layers] of the user config layer, if any.
|
||||
user_layer_index: Option<usize>,
|
||||
|
||||
/// Constraints that must be enforced when deriving a [Config] from the
|
||||
/// layers.
|
||||
requirements: ConfigRequirements,
|
||||
}
|
||||
|
||||
impl ConfigLayerStack {
|
||||
pub fn with_user_config(&self, user_config: TomlValue) -> Self {
|
||||
Self {
|
||||
user: ConfigLayerEntry::new(self.user.name.clone(), user_config),
|
||||
session_flags: self.session_flags.clone(),
|
||||
system: self.system.clone(),
|
||||
mdm: self.mdm.clone(),
|
||||
pub fn new(
|
||||
layers: Vec<ConfigLayerEntry>,
|
||||
requirements: ConfigRequirements,
|
||||
) -> std::io::Result<Self> {
|
||||
let user_layer_index = verify_layer_ordering(&layers)?;
|
||||
Ok(Self {
|
||||
layers,
|
||||
user_layer_index,
|
||||
requirements,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the user config layer, if any.
|
||||
pub fn get_user_layer(&self) -> Option<&ConfigLayerEntry> {
|
||||
self.user_layer_index
|
||||
.and_then(|index| self.layers.get(index))
|
||||
}
|
||||
|
||||
pub fn requirements(&self) -> &ConfigRequirements {
|
||||
&self.requirements
|
||||
}
|
||||
|
||||
/// Creates a new [ConfigLayerStack] using the specified values to inject a
|
||||
/// "user layer" into the stack. If such a layer already exists, it is
|
||||
/// replaced; otherwise, it is inserted into the stack at the appropriate
|
||||
/// position based on precedence rules.
|
||||
pub fn with_user_config(&self, config_toml: &AbsolutePathBuf, user_config: TomlValue) -> Self {
|
||||
let user_layer = ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: config_toml.clone(),
|
||||
},
|
||||
user_config,
|
||||
);
|
||||
|
||||
let mut layers = self.layers.clone();
|
||||
match self.user_layer_index {
|
||||
Some(index) => {
|
||||
layers[index] = user_layer;
|
||||
Self {
|
||||
layers,
|
||||
user_layer_index: self.user_layer_index,
|
||||
requirements: self.requirements.clone(),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let user_layer_index = match layers
|
||||
.iter()
|
||||
.position(|layer| layer.name.precedence() > user_layer.name.precedence())
|
||||
{
|
||||
Some(index) => {
|
||||
layers.insert(index, user_layer);
|
||||
index
|
||||
}
|
||||
None => {
|
||||
layers.push(user_layer);
|
||||
layers.len() - 1
|
||||
}
|
||||
};
|
||||
Self {
|
||||
layers,
|
||||
user_layer_index: Some(user_layer_index),
|
||||
requirements: self.requirements.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn effective_config(&self) -> TomlValue {
|
||||
let mut merged = self.user.config.clone();
|
||||
merge_toml_values(&mut merged, &self.session_flags.config);
|
||||
if let Some(system) = &self.system {
|
||||
merge_toml_values(&mut merged, &system.config);
|
||||
}
|
||||
if let Some(mdm) = &self.mdm {
|
||||
merge_toml_values(&mut merged, &mdm.config);
|
||||
let mut merged = TomlValue::Table(toml::map::Map::new());
|
||||
for layer in &self.layers {
|
||||
merge_toml_values(&mut merged, &layer.config);
|
||||
}
|
||||
merged
|
||||
}
|
||||
@@ -83,38 +168,82 @@ impl ConfigLayerStack {
|
||||
let mut origins = HashMap::new();
|
||||
let mut path = Vec::new();
|
||||
|
||||
record_origins(
|
||||
&self.user.config,
|
||||
&self.user.metadata(),
|
||||
&mut path,
|
||||
&mut origins,
|
||||
);
|
||||
record_origins(
|
||||
&self.session_flags.config,
|
||||
&self.session_flags.metadata(),
|
||||
&mut path,
|
||||
&mut origins,
|
||||
);
|
||||
if let Some(system) = &self.system {
|
||||
record_origins(&system.config, &system.metadata(), &mut path, &mut origins);
|
||||
}
|
||||
if let Some(mdm) = &self.mdm {
|
||||
record_origins(&mdm.config, &mdm.metadata(), &mut path, &mut origins);
|
||||
for layer in &self.layers {
|
||||
record_origins(&layer.config, &layer.metadata(), &mut path, &mut origins);
|
||||
}
|
||||
|
||||
origins
|
||||
}
|
||||
|
||||
pub fn layers_high_to_low(&self) -> Vec<ConfigLayer> {
|
||||
let mut layers = Vec::new();
|
||||
if let Some(mdm) = &self.mdm {
|
||||
layers.push(mdm.as_layer());
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> {
|
||||
self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst)
|
||||
}
|
||||
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn get_layers(&self, ordering: ConfigLayerStackOrdering) -> Vec<&ConfigLayerEntry> {
|
||||
match ordering {
|
||||
ConfigLayerStackOrdering::HighestPrecedenceFirst => self.layers.iter().rev().collect(),
|
||||
ConfigLayerStackOrdering::LowestPrecedenceFirst => self.layers.iter().collect(),
|
||||
}
|
||||
if let Some(system) = &self.system {
|
||||
layers.push(system.as_layer());
|
||||
}
|
||||
layers.push(self.session_flags.as_layer());
|
||||
layers.push(self.user.as_layer());
|
||||
layers
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures precedence ordering of config layers is correct. Returns the index
|
||||
/// of the user config layer, if any (at most one should exist).
|
||||
fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<usize>> {
|
||||
if !layers.iter().map(|layer| &layer.name).is_sorted() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"config layers are not in correct precedence order",
|
||||
));
|
||||
}
|
||||
|
||||
// The previous check ensured `layers` is sorted by precedence, so now we
|
||||
// further verify that:
|
||||
// 1. There is at most one user config layer.
|
||||
// 2. Project layers are ordered from root to cwd.
|
||||
let mut user_layer_index: Option<usize> = None;
|
||||
let mut previous_project_dot_codex_folder: Option<&AbsolutePathBuf> = None;
|
||||
for (index, layer) in layers.iter().enumerate() {
|
||||
if matches!(layer.name, ConfigLayerSource::User { .. }) {
|
||||
if user_layer_index.is_some() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"multiple user config layers found",
|
||||
));
|
||||
}
|
||||
user_layer_index = Some(index);
|
||||
}
|
||||
|
||||
if let ConfigLayerSource::Project {
|
||||
dot_codex_folder: current_project_dot_codex_folder,
|
||||
} = &layer.name
|
||||
{
|
||||
if let Some(previous) = previous_project_dot_codex_folder {
|
||||
let Some(parent) = previous.as_path().parent() else {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layer has no parent directory",
|
||||
));
|
||||
};
|
||||
if previous == current_project_dot_codex_folder
|
||||
|| !current_project_dot_codex_folder
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.any(|ancestor| ancestor == parent)
|
||||
{
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layers are not ordered from root to cwd",
|
||||
));
|
||||
}
|
||||
}
|
||||
previous_project_dot_codex_folder = Some(current_project_dot_codex_folder);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(user_layer_index)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
use super::LoaderOverrides;
|
||||
use super::load_config_layers_state;
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::fingerprint::version_for_toml;
|
||||
use crate::config_loader::load_requirements_toml;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
#[cfg(target_os = "macos")]
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
@@ -33,11 +45,18 @@ extra = true
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let state = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load config");
|
||||
let loaded = state.effective_config();
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
|
||||
@@ -57,23 +76,58 @@ extra = true
|
||||
async fn returns_empty_when_all_layers_missing() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let layers = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load layers");
|
||||
let base_table = layers.user.config.as_table().expect("base table expected");
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let layers = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load layers");
|
||||
let user_layer = layers
|
||||
.get_user_layer()
|
||||
.expect("expected a user layer even when CODEX_HOME/config.toml does not exist");
|
||||
assert_eq!(
|
||||
&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::User {
|
||||
file: AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, tmp.path())
|
||||
.expect("resolve user config.toml path")
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
},
|
||||
user_layer,
|
||||
);
|
||||
assert_eq!(
|
||||
user_layer.config,
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
"expected empty config for user layer when config.toml does not exist"
|
||||
);
|
||||
|
||||
let binding = layers.effective_config();
|
||||
let base_table = binding.as_table().expect("base table expected");
|
||||
assert!(
|
||||
base_table.is_empty(),
|
||||
"expected empty base layer when configs missing"
|
||||
);
|
||||
assert!(
|
||||
layers.system.is_none(),
|
||||
"managed config layer should be absent when file missing"
|
||||
let num_system_layers = layers
|
||||
.layers_high_to_low()
|
||||
.iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::System { .. }))
|
||||
.count();
|
||||
let expected_system_layers = if cfg!(unix) { 1 } else { 0 };
|
||||
assert_eq!(
|
||||
num_system_layers, expected_system_layers,
|
||||
"system layer should be present only on unix"
|
||||
);
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
@@ -92,12 +146,6 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
async fn managed_preferences_take_highest_precedence() {
|
||||
use base64::Engine;
|
||||
|
||||
let managed_payload = r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#;
|
||||
let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes());
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
@@ -119,12 +167,28 @@ flag = true
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(encoded),
|
||||
managed_preferences_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let state = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load config");
|
||||
let loaded = state.effective_config();
|
||||
let nested = loaded
|
||||
.get("nested")
|
||||
@@ -136,3 +200,348 @@ flag = true
|
||||
);
|
||||
assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false)));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_are_applied() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(tmp.path().join("managed_config.toml")),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
allowed_sandbox_modes = ["read-only"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert_eq!(
|
||||
*state.requirements().sandbox_policy.get(),
|
||||
SandboxPolicy::ReadOnly
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: Vec::new(),
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
})
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_take_precedence() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
tokio::fs::write(&managed_path, "approval_policy = \"on-request\"\n").await?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn load_requirements_toml_produces_expected_constraints() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let requirements_file = tmp.path().join("requirements.toml");
|
||||
tokio::fs::write(
|
||||
&requirements_file,
|
||||
r#"
|
||||
allowed_approval_policies = ["never", "on-request"]
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?;
|
||||
|
||||
assert_eq!(
|
||||
config_requirements_toml.allowed_approval_policies,
|
||||
Some(vec![AskForApproval::Never, AskForApproval::OnRequest])
|
||||
);
|
||||
|
||||
let config_requirements: ConfigRequirements = config_requirements_toml.try_into()?;
|
||||
assert_eq!(
|
||||
config_requirements.approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
config_requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::Never)?;
|
||||
assert!(
|
||||
config_requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnFailure)
|
||||
.is_err()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let config = layers.effective_config();
|
||||
let foo = config
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_paths_resolve_relative_to_dot_codex_and_override_in_order() -> std::io::Result<()>
|
||||
{
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let root_cfg = r#"
|
||||
experimental_instructions_file = "root.txt"
|
||||
"#;
|
||||
let nested_cfg = r#"
|
||||
experimental_instructions_file = "child.txt"
|
||||
"#;
|
||||
tokio::fs::write(project_root.join(".codex").join(CONFIG_TOML_FILE), root_cfg).await?;
|
||||
tokio::fs::write(nested.join(".codex").join(CONFIG_TOML_FILE), nested_cfg).await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join("root.txt"),
|
||||
"root instructions",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join("child.txt"),
|
||||
"child instructions",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(codex_home)
|
||||
.harness_overrides(ConfigOverrides {
|
||||
cwd: Some(nested.clone()),
|
||||
..ConfigOverrides::default()
|
||||
})
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
config.base_instructions.as_deref(),
|
||||
Some("child instructions")
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(&nested).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::Project { .. }))
|
||||
.collect();
|
||||
assert_eq!(
|
||||
vec![&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::Project {
|
||||
dot_codex_folder: AbsolutePathBuf::from_absolute_path(project_root.join(".codex"))?,
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
}],
|
||||
project_layers
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".hg"), "hg").await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
tokio::fs::write(
|
||||
codex_home.join(CONFIG_TOML_FILE),
|
||||
r#"
|
||||
project_root_markers = [".hg"]
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let merged = layers.effective_config();
|
||||
let foo = merged
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
@@ -379,9 +379,9 @@ mod tests {
|
||||
assert_matches!(truncated2, InitialHistory::New);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_session_prefix_messages_when_truncating() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
#[tokio::test]
|
||||
async fn ignores_session_prefix_messages_when_truncating() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let mut items = session.build_initial_context(&turn_context);
|
||||
items.push(user_msg("feature request"));
|
||||
items.push(assistant_msg("ack"));
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display as DeriveDisplay;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::NetworkAccess;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
@@ -12,15 +8,11 @@ use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::ENVIRONMENT_CONTEXT_CLOSE_TAG;
|
||||
use codex_protocol::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, DeriveDisplay)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum NetworkAccess {
|
||||
Restricted,
|
||||
Enabled,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "environment_context", rename_all = "snake_case")]
|
||||
pub(crate) struct EnvironmentContext {
|
||||
@@ -45,12 +37,14 @@ impl EnvironmentContext {
|
||||
sandbox_mode: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(SandboxMode::ReadOnly),
|
||||
Some(SandboxPolicy::ExternalSandbox { .. }) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::WorkspaceWrite { .. }) => Some(SandboxMode::WorkspaceWrite),
|
||||
None => None,
|
||||
},
|
||||
network_access: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(NetworkAccess::Enabled),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(NetworkAccess::Restricted),
|
||||
Some(SandboxPolicy::ExternalSandbox { network_access }) => Some(network_access),
|
||||
Some(SandboxPolicy::WorkspaceWrite { network_access, .. }) => {
|
||||
if network_access {
|
||||
Some(NetworkAccess::Enabled)
|
||||
@@ -272,6 +266,48 @@ mod tests {
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_external_sandbox_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Enabled,
|
||||
}),
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_external_sandbox_with_restricted_network_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
}),
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_full_access_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
|
||||
@@ -135,7 +135,9 @@ pub async fn process_exec_tool_call(
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<ExecToolCallOutput> {
|
||||
let sandbox_type = match &sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess => SandboxType::None,
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => {
|
||||
SandboxType::None
|
||||
}
|
||||
_ => get_platform_sandbox().unwrap_or(SandboxType::None),
|
||||
};
|
||||
tracing::debug!("Sandbox type: {sandbox_type:?}");
|
||||
@@ -523,7 +525,10 @@ async fn exec(
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
#[cfg(target_os = "windows")]
|
||||
if sandbox == SandboxType::WindowsRestrictedToken
|
||||
&& !matches!(sandbox_policy, SandboxPolicy::DangerFullAccess)
|
||||
&& !matches!(
|
||||
sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
|
||||
)
|
||||
{
|
||||
return exec_windows_sandbox(params, sandbox_policy).await;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user