mirror of
https://github.com/openai/codex.git
synced 2026-02-05 16:33:42 +00:00
Compare commits
40 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3f6608e6b | ||
|
|
0e051644a9 | ||
|
|
40d14c0756 | ||
|
|
af65666561 | ||
|
|
2ae1f81d84 | ||
|
|
d363a0968e | ||
|
|
bce030ddb5 | ||
|
|
f4af6e389e | ||
|
|
b315b22f7b | ||
|
|
c9e149fd5c | ||
|
|
bacdc004be | ||
|
|
ab5972d447 | ||
|
|
767b66f407 | ||
|
|
830ab4ce20 | ||
|
|
3f73e2c892 | ||
|
|
1822ffe870 | ||
|
|
7e2165f394 | ||
|
|
8e5f38c0f0 | ||
|
|
1388e99674 | ||
|
|
f56d1dc8fc | ||
|
|
9be310041b | ||
|
|
0fbcdd77c8 | ||
|
|
9bce050385 | ||
|
|
3f92ad4190 | ||
|
|
54ee302a06 | ||
|
|
44fa06ae36 | ||
|
|
856f97f449 | ||
|
|
fe7a3f0c2b | ||
|
|
c30ca0d5b6 | ||
|
|
a8a6cbdd1c | ||
|
|
e4257f432e | ||
|
|
2c793083f4 | ||
|
|
e150798baf | ||
|
|
33a6cc66ab | ||
|
|
52d0ec4cd8 | ||
|
|
397279d46e | ||
|
|
30ca89424c | ||
|
|
d909048a85 | ||
|
|
888c6dd9e7 | ||
|
|
b5dd189067 |
1
.github/workflows/issue-deduplicator.yml
vendored
1
.github/workflows/issue-deduplicator.yml
vendored
@@ -46,7 +46,6 @@ jobs:
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
model: gpt-5.1
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
|
||||
22
.github/workflows/rust-release.yml
vendored
22
.github/workflows/rust-release.yml
vendored
@@ -371,8 +371,20 @@ jobs:
|
||||
path: |
|
||||
codex-rs/dist/${{ matrix.target }}/*
|
||||
|
||||
shell-tool-mcp:
|
||||
name: shell-tool-mcp
|
||||
needs: tag-check
|
||||
uses: ./.github/workflows/shell-tool-mcp.yml
|
||||
with:
|
||||
release-tag: ${{ github.ref_name }}
|
||||
# We are not ready to publish yet.
|
||||
publish: false
|
||||
secrets: inherit
|
||||
|
||||
release:
|
||||
needs: build
|
||||
needs:
|
||||
- build
|
||||
- shell-tool-mcp
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -395,6 +407,14 @@ jobs:
|
||||
- name: List
|
||||
run: ls -R dist/
|
||||
|
||||
# This is a temporary fix: we should modify shell-tool-mcp.yml so these
|
||||
# files do not end up in dist/ in the first place.
|
||||
- name: Delete entries from dist/ that should not go in the release
|
||||
run: |
|
||||
rm -rf dist/shell-tool-mcp*
|
||||
|
||||
ls -R dist/
|
||||
|
||||
- name: Define release name
|
||||
id: release_name
|
||||
run: |
|
||||
|
||||
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: shell-tool-mcp CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: "pnpm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Format check
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run format
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp test
|
||||
|
||||
- name: Build
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
412
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
412
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
@@ -0,0 +1,412 @@
|
||||
name: shell-tool-mcp
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release-version:
|
||||
description: Version to publish (x.y.z or x.y.z-alpha.N). Defaults to GITHUB_REF_NAME when it starts with rust-v.
|
||||
required: false
|
||||
type: string
|
||||
release-tag:
|
||||
description: Tag name to use when downloading release artifacts (defaults to rust-v<version>).
|
||||
required: false
|
||||
type: string
|
||||
publish:
|
||||
description: Whether to publish to npm when the version is releasable.
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
metadata:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.compute.outputs.version }}
|
||||
release_tag: ${{ steps.compute.outputs.release_tag }}
|
||||
should_publish: ${{ steps.compute.outputs.should_publish }}
|
||||
npm_tag: ${{ steps.compute.outputs.npm_tag }}
|
||||
steps:
|
||||
- name: Compute version and tags
|
||||
id: compute
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
version="${{ inputs.release-version }}"
|
||||
release_tag="${{ inputs.release-tag }}"
|
||||
|
||||
if [[ -z "$version" ]]; then
|
||||
if [[ -n "$release_tag" && "$release_tag" =~ ^rust-v.+ ]]; then
|
||||
version="${release_tag#rust-v}"
|
||||
elif [[ "${GITHUB_REF_NAME:-}" =~ ^rust-v.+ ]]; then
|
||||
version="${GITHUB_REF_NAME#rust-v}"
|
||||
release_tag="${GITHUB_REF_NAME}"
|
||||
else
|
||||
echo "release-version is required when GITHUB_REF_NAME is not a rust-v tag."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$release_tag" ]]; then
|
||||
release_tag="rust-v${version}"
|
||||
fi
|
||||
|
||||
npm_tag=""
|
||||
should_publish="false"
|
||||
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
elif [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
npm_tag="alpha"
|
||||
fi
|
||||
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
rust-binaries:
|
||||
name: Build Rust - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
install_musl: true
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
install_musl: true
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Install musl build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y musl-tools pkg-config
|
||||
|
||||
- name: Build exec server binaries
|
||||
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
|
||||
|
||||
- name: Stage exec server binaries
|
||||
run: |
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
|
||||
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-rust-${{ matrix.target }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-linux:
|
||||
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: ubuntu:24.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: ubuntu:22.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: ubuntu:20.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: debian:12
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: debian:11
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: arm64v8/ubuntu:24.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: arm64v8/ubuntu:22.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: arm64v8/ubuntu:20.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: arm64v8/debian:12
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: arm64v8/debian:11
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
steps:
|
||||
- name: Install build prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
else
|
||||
echo "Unsupported package manager in container"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-darwin:
|
||||
name: Build Bash (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-15
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-14
|
||||
- runner: macos-13
|
||||
target: x86_64-apple-darwin
|
||||
variant: macos-13
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
package:
|
||||
name: Package npm module
|
||||
needs:
|
||||
- metadata
|
||||
- rust-binaries
|
||||
- bash-linux
|
||||
- bash-darwin
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build (shell-tool-mcp)
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Assemble staging directory
|
||||
id: staging
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${STAGING_DIR}"
|
||||
mkdir -p "$staging" "$staging/vendor"
|
||||
cp shell-tool-mcp/README.md "$staging/"
|
||||
cp shell-tool-mcp/package.json "$staging/"
|
||||
cp -R shell-tool-mcp/bin "$staging/"
|
||||
|
||||
found_vendor="false"
|
||||
shopt -s nullglob
|
||||
for vendor_dir in artifacts/*/vendor; do
|
||||
rsync -av "$vendor_dir/" "$staging/vendor/"
|
||||
found_vendor="true"
|
||||
done
|
||||
if [[ "$found_vendor" == "false" ]]; then
|
||||
echo "No vendor payloads were downloaded."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
node - <<'NODE'
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
const stagingDir = process.env.STAGING_DIR;
|
||||
const version = process.env.PACKAGE_VERSION;
|
||||
const pkgPath = path.join(stagingDir, "package.json");
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
|
||||
pkg.version = version;
|
||||
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n");
|
||||
NODE
|
||||
|
||||
echo "dir=$staging" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
STAGING_DIR: ${{ runner.temp }}/shell-tool-mcp
|
||||
|
||||
- name: Ensure binaries are executable
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
chmod +x \
|
||||
"$staging"/vendor/*/codex-exec-mcp-server \
|
||||
"$staging"/vendor/*/codex-execve-wrapper \
|
||||
"$staging"/vendor/*/bash/*/bash
|
||||
|
||||
- name: Create npm tarball
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p dist/npm
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
|
||||
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
|
||||
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
|
||||
if-no-files-found: error
|
||||
|
||||
publish:
|
||||
name: Publish npm package
|
||||
needs:
|
||||
- metadata
|
||||
- package
|
||||
if: ${{ inputs.publish && needs.metadata.outputs.should_publish == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
registry-url: https://registry.npmjs.org
|
||||
scope: "@openai"
|
||||
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm
|
||||
|
||||
- name: Publish to npm
|
||||
env:
|
||||
NPM_TAG: ${{ needs.metadata.outputs.npm_tag }}
|
||||
VERSION: ${{ needs.metadata.outputs.version }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tag_args=()
|
||||
if [[ -n "${NPM_TAG}" ]]; then
|
||||
tag_args+=(--tag "${NPM_TAG}")
|
||||
fi
|
||||
npm publish "dist/npm/codex-shell-tool-mcp-npm-${VERSION}.tgz" "${tag_args[@]}"
|
||||
@@ -92,15 +92,15 @@ prefix_rule(
|
||||
|
||||
In this example rule, if Codex wants to run commands with the prefix `git push` or `git fetch`, it will first ask for user approval.
|
||||
|
||||
Use [`execpolicy2` CLI](./codex-rs/execpolicy2/README.md) to preview decisions for policy files:
|
||||
Use the `codex execpolicy check` subcommand to preview decisions before you save a rule (see the [`codex-execpolicy` README](./codex-rs/execpolicy/README.md) for syntax details):
|
||||
|
||||
```shell
|
||||
cargo run -p codex-execpolicy2 -- check --policy ~/.codex/policy/default.codexpolicy git push origin main
|
||||
codex execpolicy check --policy ~/.codex/policy/default.codexpolicy git push origin main
|
||||
```
|
||||
|
||||
Pass multiple `--policy` flags to test how several files combine. See the [`codex-rs/execpolicy2` README](./codex-rs/execpolicy2/README.md) for a more detailed walkthrough of the available syntax.
|
||||
Pass multiple `--policy` flags to test how several files combine, and use `--pretty` for formatted JSON output. See the [`codex-rs/execpolicy` README](./codex-rs/execpolicy/README.md) for a more detailed walkthrough of the available syntax.
|
||||
|
||||
---
|
||||
## Note: `execpolicy` commands are still in preview. The API may have breaking changes in the future.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
|
||||
@@ -7,3 +7,7 @@ slow-timeout = { period = "15s", terminate-after = 2 }
|
||||
# Do not add new tests here
|
||||
filter = 'test(rmcp_client) | test(humanlike_typing_1000_chars_appears_live_no_placeholder)'
|
||||
slow-timeout = { period = "1m", terminate-after = 4 }
|
||||
|
||||
[[profile.default.overrides]]
|
||||
filter = 'test(approval_matrix_covers_all_modes)'
|
||||
slow-timeout = { period = "30s", terminate-after = 2 }
|
||||
|
||||
74
codex-rs/Cargo.lock
generated
74
codex-rs/Cargo.lock
generated
@@ -187,8 +187,10 @@ dependencies = [
|
||||
"codex-app-server-protocol",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"core_test_support",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tokio",
|
||||
"uuid",
|
||||
"wiremock",
|
||||
@@ -260,7 +262,7 @@ dependencies = [
|
||||
"memchr",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustc-hash",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"syn 2.0.104",
|
||||
@@ -726,6 +728,17 @@ version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "chardetng"
|
||||
version = "0.1.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14b8f0b65b7b08ae3c8187e8d77174de20cb6777864c6b832d8ad365999cf1ea"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"encoding_rs",
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.42"
|
||||
@@ -849,7 +862,6 @@ dependencies = [
|
||||
"codex-login",
|
||||
"codex-protocol",
|
||||
"codex-utils-json-to-toml",
|
||||
"codex-windows-sandbox",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"opentelemetry-appender-tracing",
|
||||
@@ -858,6 +870,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"shlex",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"toml",
|
||||
@@ -880,6 +893,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum_macros 0.27.2",
|
||||
"thiserror 2.0.17",
|
||||
"ts-rs",
|
||||
"uuid",
|
||||
]
|
||||
@@ -990,6 +1004,7 @@ dependencies = [
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
"codex-exec",
|
||||
"codex-execpolicy",
|
||||
"codex-login",
|
||||
"codex-mcp-server",
|
||||
"codex-process-hardening",
|
||||
@@ -1081,6 +1096,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"base64",
|
||||
"bytes",
|
||||
"chardetng",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
"codex-apply-patch",
|
||||
@@ -1096,13 +1112,13 @@ dependencies = [
|
||||
"codex-utils-pty",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-string",
|
||||
"codex-utils-tokenizer",
|
||||
"codex-windows-sandbox",
|
||||
"core-foundation 0.9.4",
|
||||
"core_test_support",
|
||||
"ctor 0.5.0",
|
||||
"dirs",
|
||||
"dunce",
|
||||
"encoding_rs",
|
||||
"env-flags",
|
||||
"escargot",
|
||||
"eventsource-stream",
|
||||
@@ -1202,6 +1218,7 @@ dependencies = [
|
||||
"socket2 0.6.0",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
@@ -1615,18 +1632,6 @@ dependencies = [
|
||||
name = "codex-utils-string"
|
||||
version = "0.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-tokenizer"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-utils-cache",
|
||||
"pretty_assertions",
|
||||
"thiserror 2.0.17",
|
||||
"tiktoken-rs",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-windows-sandbox"
|
||||
version = "0.1.0"
|
||||
@@ -1774,6 +1779,7 @@ dependencies = [
|
||||
"notify",
|
||||
"regex-lite",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"walkdir",
|
||||
@@ -2448,17 +2454,6 @@ dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fancy-regex"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"regex-automata",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.3.0"
|
||||
@@ -3746,11 +3741,13 @@ dependencies = [
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-mcp-server",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"os_info",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tokio",
|
||||
"wiremock",
|
||||
]
|
||||
@@ -4783,7 +4780,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"quinn-proto",
|
||||
"quinn-udp",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustc-hash",
|
||||
"rustls",
|
||||
"socket2 0.6.0",
|
||||
"thiserror 2.0.17",
|
||||
@@ -4803,7 +4800,7 @@ dependencies = [
|
||||
"lru-slab",
|
||||
"rand 0.9.2",
|
||||
"ring",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustc-hash",
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
@@ -5148,12 +5145,6 @@ version = "0.1.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.1.1"
|
||||
@@ -6374,21 +6365,6 @@ dependencies = [
|
||||
"zune-jpeg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken-rs"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a19830747d9034cd9da43a60eaa8e552dfda7712424aebf187b7a60126bae0d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
"bstr",
|
||||
"fancy-regex",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"rustc-hash 1.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.44"
|
||||
|
||||
@@ -41,7 +41,6 @@ members = [
|
||||
"utils/pty",
|
||||
"utils/readiness",
|
||||
"utils/string",
|
||||
"utils/tokenizer",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -90,7 +89,6 @@ codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-pty = { path = "utils/pty" }
|
||||
codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-utils-tokenizer = { path = "utils/tokenizer" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
@@ -111,6 +109,7 @@ axum = { version = "0.8", default-features = false }
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.10.1"
|
||||
chrono = "0.4.42"
|
||||
chardetng = "0.1.17"
|
||||
clap = "4"
|
||||
clap_complete = "4"
|
||||
color-eyre = "0.6.3"
|
||||
@@ -123,6 +122,7 @@ dotenvy = "0.15.7"
|
||||
dunce = "1.0.4"
|
||||
env-flags = "0.1.1"
|
||||
env_logger = "0.11.5"
|
||||
encoding_rs = "0.8.35"
|
||||
escargot = "0.5"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
@@ -169,7 +169,6 @@ reqwest = "0.12"
|
||||
rmcp = { version = "0.8.5", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
sentry = "0.34.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.14"
|
||||
@@ -188,7 +187,6 @@ tempfile = "3.23.0"
|
||||
test-log = "0.2.18"
|
||||
textwrap = "0.16.2"
|
||||
thiserror = "2.0.17"
|
||||
tiktoken-rs = "0.9"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
@@ -266,7 +264,6 @@ ignored = [
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-tokenizer",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
||||
@@ -19,6 +19,7 @@ schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
ts-rs = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
|
||||
@@ -378,7 +378,7 @@ macro_rules! server_notification_definitions {
|
||||
impl TryFrom<JSONRPCNotification> for ServerNotification {
|
||||
type Error = serde_json::Error;
|
||||
|
||||
fn try_from(value: JSONRPCNotification) -> Result<Self, Self::Error> {
|
||||
fn try_from(value: JSONRPCNotification) -> Result<Self, serde_json::Error> {
|
||||
serde_json::from_value(serde_json::to_value(value)?)
|
||||
}
|
||||
}
|
||||
@@ -487,6 +487,7 @@ pub struct FuzzyFileSearchResponse {
|
||||
|
||||
server_notification_definitions! {
|
||||
/// NEW NOTIFICATIONS
|
||||
Error => "error" (v2::ErrorNotification),
|
||||
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
|
||||
TurnStarted => "turn/started" (v2::TurnStartedNotification),
|
||||
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
|
||||
|
||||
@@ -11,6 +11,7 @@ use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
|
||||
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
@@ -20,6 +21,7 @@ use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value as JsonValue;
|
||||
use thiserror::Error;
|
||||
use ts_rs::TS;
|
||||
|
||||
// Macro to declare a camelCased API v2 enum mirroring a core enum which
|
||||
@@ -47,6 +49,72 @@ macro_rules! v2_enum_from_core {
|
||||
};
|
||||
}
|
||||
|
||||
/// This translation layer make sure that we expose codex error code in camel case.
|
||||
///
|
||||
/// When an upstream HTTP status is available (for example, from the Responses API or a provider),
|
||||
/// it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum CodexErrorInfo {
|
||||
ContextWindowExceeded,
|
||||
UsageLimitExceeded,
|
||||
HttpConnectionFailed {
|
||||
#[serde(rename = "httpStatusCode")]
|
||||
#[ts(rename = "httpStatusCode")]
|
||||
http_status_code: Option<u16>,
|
||||
},
|
||||
/// Failed to connect to the response SSE stream.
|
||||
ResponseStreamConnectionFailed {
|
||||
#[serde(rename = "httpStatusCode")]
|
||||
#[ts(rename = "httpStatusCode")]
|
||||
http_status_code: Option<u16>,
|
||||
},
|
||||
InternalServerError,
|
||||
Unauthorized,
|
||||
BadRequest,
|
||||
SandboxError,
|
||||
/// The response SSE stream disconnected in the middle of a turn before completion.
|
||||
ResponseStreamDisconnected {
|
||||
#[serde(rename = "httpStatusCode")]
|
||||
#[ts(rename = "httpStatusCode")]
|
||||
http_status_code: Option<u16>,
|
||||
},
|
||||
/// Reached the retry limit for responses.
|
||||
ResponseTooManyFailedAttempts {
|
||||
#[serde(rename = "httpStatusCode")]
|
||||
#[ts(rename = "httpStatusCode")]
|
||||
http_status_code: Option<u16>,
|
||||
},
|
||||
Other,
|
||||
}
|
||||
|
||||
impl From<CoreCodexErrorInfo> for CodexErrorInfo {
|
||||
fn from(value: CoreCodexErrorInfo) -> Self {
|
||||
match value {
|
||||
CoreCodexErrorInfo::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded,
|
||||
CoreCodexErrorInfo::UsageLimitExceeded => CodexErrorInfo::UsageLimitExceeded,
|
||||
CoreCodexErrorInfo::HttpConnectionFailed { http_status_code } => {
|
||||
CodexErrorInfo::HttpConnectionFailed { http_status_code }
|
||||
}
|
||||
CoreCodexErrorInfo::ResponseStreamConnectionFailed { http_status_code } => {
|
||||
CodexErrorInfo::ResponseStreamConnectionFailed { http_status_code }
|
||||
}
|
||||
CoreCodexErrorInfo::InternalServerError => CodexErrorInfo::InternalServerError,
|
||||
CoreCodexErrorInfo::Unauthorized => CodexErrorInfo::Unauthorized,
|
||||
CoreCodexErrorInfo::BadRequest => CodexErrorInfo::BadRequest,
|
||||
CoreCodexErrorInfo::SandboxError => CodexErrorInfo::SandboxError,
|
||||
CoreCodexErrorInfo::ResponseStreamDisconnected { http_status_code } => {
|
||||
CodexErrorInfo::ResponseStreamDisconnected { http_status_code }
|
||||
}
|
||||
CoreCodexErrorInfo::ResponseTooManyFailedAttempts { http_status_code } => {
|
||||
CodexErrorInfo::ResponseTooManyFailedAttempts { http_status_code }
|
||||
}
|
||||
CoreCodexErrorInfo::Other => CodexErrorInfo::Other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
|
||||
UnlessTrusted, OnFailure, OnRequest, Never
|
||||
@@ -544,11 +612,20 @@ pub struct Turn {
|
||||
pub status: TurnStatus,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, Error)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
#[error("{message}")]
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub codex_error_info: Option<CodexErrorInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub struct ErrorNotification {
|
||||
pub error: TurnError,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -782,6 +859,7 @@ pub enum CommandExecutionStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Declined,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1091,6 +1169,7 @@ mod tests {
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
@@ -1176,4 +1255,20 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codex_error_info_serializes_http_status_code_in_camel_case() {
|
||||
let value = CodexErrorInfo::ResponseTooManyFailedAttempts {
|
||||
http_status_code: Some(401),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
serde_json::to_value(value).unwrap(),
|
||||
json!({
|
||||
"responseTooManyFailedAttempts": {
|
||||
"httpStatusCode": 401
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
|
||||
opentelemetry-appender-tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
codex-windows-sandbox.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
app_test_support = { workspace = true }
|
||||
@@ -54,3 +53,4 @@ serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -339,6 +339,29 @@ Event notifications are the server-initiated event stream for thread lifecycles,
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` plus token `usage`), and clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
|
||||
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
|
||||
|
||||
#### Errors
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
- `ContextWindowExceeded`
|
||||
- `UsageLimitExceeded`
|
||||
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
|
||||
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
|
||||
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
|
||||
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
|
||||
- `BadRequest`
|
||||
- `Unauthorized`
|
||||
- `SandboxError`
|
||||
- `InternalServerError`
|
||||
- `Other`: all unclassified errors
|
||||
|
||||
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
|
||||
#### Thread items
|
||||
|
||||
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
|
||||
|
||||
@@ -8,11 +8,13 @@ use codex_app_server_protocol::AgentMessageDeltaNotification;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalResponse;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CodexErrorInfo as V2CodexErrorInfo;
|
||||
use codex_app_server_protocol::CommandAction as V2ParsedCommand;
|
||||
use codex_app_server_protocol::CommandExecutionOutputDeltaNotification;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::ErrorNotification;
|
||||
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
@@ -173,12 +175,20 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
});
|
||||
}
|
||||
ApiVersion::V2 => {
|
||||
let item_id = call_id.clone();
|
||||
let command_actions = parsed_cmd
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
let command_string = shlex_join(&command);
|
||||
|
||||
let params = CommandExecutionRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: turn_id.clone(),
|
||||
// Until we migrate the core to be aware of a first class CommandExecutionItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
item_id: call_id.clone(),
|
||||
item_id: item_id.clone(),
|
||||
reason,
|
||||
risk: risk.map(V2SandboxCommandAssessment::from),
|
||||
};
|
||||
@@ -188,8 +198,17 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
))
|
||||
.await;
|
||||
tokio::spawn(async move {
|
||||
on_command_execution_request_approval_response(event_id, rx, conversation)
|
||||
.await;
|
||||
on_command_execution_request_approval_response(
|
||||
event_id,
|
||||
item_id,
|
||||
command_string,
|
||||
cwd,
|
||||
command_actions,
|
||||
rx,
|
||||
conversation,
|
||||
outgoing,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
},
|
||||
@@ -260,7 +279,29 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
}
|
||||
}
|
||||
EventMsg::Error(ev) => {
|
||||
handle_error(conversation_id, ev.message, &turn_summary_store).await;
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
};
|
||||
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
error: turn_error,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
EventMsg::StreamError(ev) => {
|
||||
// We don't need to update the turn summary store for stream errors as they are intermediate error states for retries,
|
||||
// but we notify the client.
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
error: turn_error,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
EventMsg::EnteredReviewMode(review_request) => {
|
||||
let notification = ItemStartedNotification {
|
||||
@@ -346,16 +387,21 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
let item_id = exec_command_begin_event.call_id.clone();
|
||||
let command_actions = exec_command_begin_event
|
||||
.parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
let command = shlex_join(&exec_command_begin_event.command);
|
||||
let cwd = exec_command_begin_event.cwd;
|
||||
|
||||
let item = ThreadItem::CommandExecution {
|
||||
id: exec_command_begin_event.call_id.clone(),
|
||||
command: shlex_join(&exec_command_begin_event.command),
|
||||
cwd: exec_command_begin_event.cwd,
|
||||
id: item_id,
|
||||
command,
|
||||
cwd,
|
||||
status: CommandExecutionStatus::InProgress,
|
||||
command_actions: exec_command_begin_event
|
||||
.parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect(),
|
||||
command_actions,
|
||||
aggregated_output: None,
|
||||
exit_code: None,
|
||||
duration_ms: None,
|
||||
@@ -393,6 +439,10 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
} else {
|
||||
CommandExecutionStatus::Failed
|
||||
};
|
||||
let command_actions = parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let aggregated_output = if aggregated_output.is_empty() {
|
||||
None
|
||||
@@ -407,7 +457,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
command: shlex_join(&command),
|
||||
cwd,
|
||||
status,
|
||||
command_actions: parsed_cmd.into_iter().map(V2ParsedCommand::from).collect(),
|
||||
command_actions,
|
||||
aggregated_output,
|
||||
exit_code: Some(exit_code),
|
||||
duration_ms: Some(duration_ms),
|
||||
@@ -492,6 +542,30 @@ async fn complete_file_change_item(
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn complete_command_execution_item(
|
||||
item_id: String,
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
status: CommandExecutionStatus,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
) {
|
||||
let item = ThreadItem::CommandExecution {
|
||||
id: item_id,
|
||||
command,
|
||||
cwd,
|
||||
status,
|
||||
command_actions,
|
||||
aggregated_output: None,
|
||||
exit_code: None,
|
||||
duration_ms: None,
|
||||
};
|
||||
let notification = ItemCompletedNotification { item };
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn find_and_remove_turn_summary(
|
||||
conversation_id: ConversationId,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
@@ -508,10 +582,8 @@ async fn handle_turn_complete(
|
||||
) {
|
||||
let turn_summary = find_and_remove_turn_summary(conversation_id, turn_summary_store).await;
|
||||
|
||||
let status = if let Some(message) = turn_summary.last_error_message {
|
||||
TurnStatus::Failed {
|
||||
error: TurnError { message },
|
||||
}
|
||||
let status = if let Some(error) = turn_summary.last_error {
|
||||
TurnStatus::Failed { error }
|
||||
} else {
|
||||
TurnStatus::Completed
|
||||
};
|
||||
@@ -532,11 +604,11 @@ async fn handle_turn_interrupted(
|
||||
|
||||
async fn handle_error(
|
||||
conversation_id: ConversationId,
|
||||
message: String,
|
||||
error: TurnError,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
) {
|
||||
let mut map = turn_summary_store.lock().await;
|
||||
map.entry(conversation_id).or_default().last_error_message = Some(message);
|
||||
map.entry(conversation_id).or_default().last_error = Some(error);
|
||||
}
|
||||
|
||||
async fn on_patch_approval_response(
|
||||
@@ -743,42 +815,68 @@ async fn on_file_change_request_approval_response(
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_command_execution_request_approval_response(
|
||||
event_id: String,
|
||||
item_id: String,
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexConversation>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(value) => value,
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(value) => {
|
||||
let response = serde_json::from_value::<CommandExecutionRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
}
|
||||
});
|
||||
|
||||
let CommandExecutionRequestApprovalResponse {
|
||||
decision,
|
||||
accept_settings,
|
||||
} = response;
|
||||
|
||||
let (decision, completion_status) = match (decision, accept_settings) {
|
||||
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
|
||||
(ReviewDecision::ApprovedForSession, None)
|
||||
}
|
||||
(ApprovalDecision::Accept, _) => (ReviewDecision::Approved, None),
|
||||
(ApprovalDecision::Decline, _) => (
|
||||
ReviewDecision::Denied,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
(ApprovalDecision::Cancel, _) => (
|
||||
ReviewDecision::Abort,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
};
|
||||
(decision, completion_status)
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
return;
|
||||
(ReviewDecision::Denied, Some(CommandExecutionStatus::Failed))
|
||||
}
|
||||
};
|
||||
|
||||
let response = serde_json::from_value::<CommandExecutionRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
}
|
||||
});
|
||||
if let Some(status) = completion_status {
|
||||
complete_command_execution_item(
|
||||
item_id.clone(),
|
||||
command.clone(),
|
||||
cwd.clone(),
|
||||
command_actions.clone(),
|
||||
status,
|
||||
outgoing.as_ref(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let CommandExecutionRequestApprovalResponse {
|
||||
decision,
|
||||
accept_settings,
|
||||
} = response;
|
||||
|
||||
let decision = match (decision, accept_settings) {
|
||||
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
|
||||
ReviewDecision::ApprovedForSession
|
||||
}
|
||||
(ApprovalDecision::Accept, _) => ReviewDecision::Approved,
|
||||
(ApprovalDecision::Decline, _) => ReviewDecision::Denied,
|
||||
(ApprovalDecision::Cancel, _) => ReviewDecision::Abort,
|
||||
};
|
||||
if let Err(err) = conversation
|
||||
.submit(Op::ExecApproval {
|
||||
id: event_id,
|
||||
@@ -873,10 +971,24 @@ mod tests {
|
||||
let conversation_id = ConversationId::new();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
|
||||
handle_error(conversation_id, "boom".to_string(), &turn_summary_store).await;
|
||||
handle_error(
|
||||
conversation_id,
|
||||
TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
.await;
|
||||
|
||||
let turn_summary = find_and_remove_turn_summary(conversation_id, &turn_summary_store).await;
|
||||
assert_eq!(turn_summary.last_error_message, Some("boom".to_string()));
|
||||
assert_eq!(
|
||||
turn_summary.last_error,
|
||||
Some(TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -916,7 +1028,15 @@ mod tests {
|
||||
let conversation_id = ConversationId::new();
|
||||
let event_id = "interrupt1".to_string();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
handle_error(conversation_id, "oops".to_string(), &turn_summary_store).await;
|
||||
handle_error(
|
||||
conversation_id,
|
||||
TurnError {
|
||||
message: "oops".to_string(),
|
||||
codex_error_info: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
.await;
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
|
||||
|
||||
@@ -948,7 +1068,15 @@ mod tests {
|
||||
let conversation_id = ConversationId::new();
|
||||
let event_id = "complete_err1".to_string();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
handle_error(conversation_id, "bad".to_string(), &turn_summary_store).await;
|
||||
handle_error(
|
||||
conversation_id,
|
||||
TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
.await;
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
|
||||
|
||||
@@ -972,6 +1100,7 @@ mod tests {
|
||||
TurnStatus::Failed {
|
||||
error: TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
}
|
||||
}
|
||||
);
|
||||
@@ -1022,7 +1151,15 @@ mod tests {
|
||||
|
||||
// Turn 1 on conversation A
|
||||
let a_turn1 = "a_turn1".to_string();
|
||||
handle_error(conversation_a, "a1".to_string(), &turn_summary_store).await;
|
||||
handle_error(
|
||||
conversation_a,
|
||||
TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
.await;
|
||||
handle_turn_complete(
|
||||
conversation_a,
|
||||
a_turn1.clone(),
|
||||
@@ -1033,7 +1170,15 @@ mod tests {
|
||||
|
||||
// Turn 1 on conversation B
|
||||
let b_turn1 = "b_turn1".to_string();
|
||||
handle_error(conversation_b, "b1".to_string(), &turn_summary_store).await;
|
||||
handle_error(
|
||||
conversation_b,
|
||||
TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
.await;
|
||||
handle_turn_complete(
|
||||
conversation_b,
|
||||
b_turn1.clone(),
|
||||
@@ -1065,6 +1210,7 @@ mod tests {
|
||||
TurnStatus::Failed {
|
||||
error: TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
}
|
||||
}
|
||||
);
|
||||
@@ -1085,6 +1231,7 @@ mod tests {
|
||||
TurnStatus::Failed {
|
||||
error: TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
@@ -83,6 +83,7 @@ use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::ThreadStartedNotification;
|
||||
use codex_app_server_protocol::Turn;
|
||||
use codex_app_server_protocol::TurnError;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
@@ -91,7 +92,6 @@ use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInfoResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_app_server_protocol::UserSavedConfig;
|
||||
use codex_app_server_protocol::WindowsWorldWritableWarningNotification;
|
||||
use codex_app_server_protocol::build_turns_from_event_msgs;
|
||||
use codex_backend_client::Client as BackendClient;
|
||||
use codex_core::AuthManager;
|
||||
@@ -162,8 +162,8 @@ pub(crate) type PendingInterrupts = Arc<Mutex<HashMap<ConversationId, PendingInt
|
||||
/// Per-conversation accumulation of the latest states e.g. error message while a turn runs.
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TurnSummary {
|
||||
pub(crate) last_error_message: Option<String>,
|
||||
pub(crate) file_change_started: HashSet<String>,
|
||||
pub(crate) last_error: Option<TurnError>,
|
||||
}
|
||||
|
||||
pub(crate) type TurnSummaryStore = Arc<Mutex<HashMap<ConversationId, TurnSummary>>>;
|
||||
@@ -1170,7 +1170,7 @@ impl CodexMessageProcessor {
|
||||
let exec_params = ExecParams {
|
||||
command: params.command,
|
||||
cwd,
|
||||
timeout_ms,
|
||||
expiration: timeout_ms.into(),
|
||||
env,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
@@ -1276,10 +1276,6 @@ impl CodexMessageProcessor {
|
||||
return;
|
||||
}
|
||||
};
|
||||
if cfg!(windows) && config.features.enabled(Feature::WindowsSandbox) {
|
||||
self.handle_windows_world_writable_warning(config.cwd.clone())
|
||||
.await;
|
||||
}
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(conversation_id) => {
|
||||
@@ -1999,10 +1995,6 @@ impl CodexMessageProcessor {
|
||||
return;
|
||||
}
|
||||
};
|
||||
if cfg!(windows) && config.features.enabled(Feature::WindowsSandbox) {
|
||||
self.handle_windows_world_writable_warning(config.cwd.clone())
|
||||
.await;
|
||||
}
|
||||
|
||||
let conversation_history = if let Some(path) = path {
|
||||
match RolloutRecorder::get_rollout_history(&path).await {
|
||||
@@ -2861,53 +2853,6 @@ impl CodexMessageProcessor {
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// On Windows, when using the experimental sandbox, we need to warn the user about world-writable directories.
|
||||
async fn handle_windows_world_writable_warning(&self, cwd: PathBuf) {
|
||||
if !cfg!(windows) {
|
||||
return;
|
||||
}
|
||||
|
||||
if !self.config.features.enabled(Feature::WindowsSandbox) {
|
||||
return;
|
||||
}
|
||||
|
||||
if !matches!(
|
||||
self.config.sandbox_policy,
|
||||
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite { .. }
|
||||
| codex_protocol::protocol::SandboxPolicy::ReadOnly
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
if self
|
||||
.config
|
||||
.notices
|
||||
.hide_world_writable_warning
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// This function is stubbed out to return None on non-Windows platforms
|
||||
if let Some((sample_paths, extra_count, failed_scan)) =
|
||||
codex_windows_sandbox::world_writable_warning_details(
|
||||
self.config.codex_home.as_path(),
|
||||
cwd,
|
||||
)
|
||||
{
|
||||
tracing::warn!("world writable warning: {sample_paths:?} {extra_count} {failed_scan}");
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::WindowsWorldWritableWarning(
|
||||
WindowsWorldWritableWarningNotification {
|
||||
sample_paths,
|
||||
extra_count,
|
||||
failed_scan,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn derive_config_from_params(
|
||||
|
||||
@@ -24,3 +24,5 @@ tokio = { workspace = true, features = [
|
||||
] }
|
||||
uuid = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
core_test_support = { path = "../../../core/tests/common" }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -9,12 +9,14 @@ pub use auth_fixtures::ChatGptIdTokenClaims;
|
||||
pub use auth_fixtures::encode_id_token;
|
||||
pub use auth_fixtures::write_chatgpt_auth;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
pub use core_test_support::format_with_current_shell;
|
||||
pub use core_test_support::format_with_current_shell_display;
|
||||
pub use mcp_process::McpProcess;
|
||||
pub use mock_model_server::create_mock_chat_completions_server;
|
||||
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
||||
pub use responses::create_apply_patch_sse_response;
|
||||
pub use responses::create_final_assistant_message_sse_response;
|
||||
pub use responses::create_shell_sse_response;
|
||||
pub use responses::create_shell_command_sse_response;
|
||||
pub use rollout::create_fake_rollout;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn create_shell_sse_response(
|
||||
pub fn create_shell_command_sse_response(
|
||||
command: Vec<String>,
|
||||
workdir: Option<&Path>,
|
||||
timeout_ms: Option<u64>,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// The `arguments`` for the `shell` tool is a serialized JSON object.
|
||||
// The `arguments` for the `shell_command` tool is a serialized JSON object.
|
||||
let command_str = shlex::try_join(command.iter().map(String::as_str))?;
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": command,
|
||||
"command": command_str,
|
||||
"workdir": workdir.map(|w| w.to_string_lossy()),
|
||||
"timeout": timeout_ms
|
||||
"timeout_ms": timeout_ms
|
||||
}))?;
|
||||
let tool_call = json!({
|
||||
"choices": [
|
||||
@@ -21,7 +22,7 @@ pub fn create_shell_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
@@ -62,10 +63,10 @@ pub fn create_apply_patch_sse_response(
|
||||
patch_content: &str,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// Use shell command to call apply_patch with heredoc format
|
||||
let shell_command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
// Use shell_command to call apply_patch with heredoc format
|
||||
let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": ["bash", "-lc", shell_command]
|
||||
"command": command
|
||||
}))?;
|
||||
|
||||
let tool_call = json!({
|
||||
@@ -76,7 +77,7 @@ pub fn create_apply_patch_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||
@@ -56,7 +57,7 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
|
||||
// Create a mock model server that immediately ends each turn.
|
||||
// Two turns are expected: initial session configure + one user message.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec!["ls".to_string()],
|
||||
Some(&working_directory),
|
||||
Some(5000),
|
||||
@@ -175,7 +176,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
|
||||
// Mock server will request a python shell call for the first and second turn, then finish.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -186,7 +187,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -267,11 +268,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
call_id: "call1".to_string(),
|
||||
command: vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
command: format_with_current_shell("python3 -c 'print(42)'"),
|
||||
cwd: working_directory.clone(),
|
||||
reason: None,
|
||||
risk: None,
|
||||
@@ -353,23 +350,15 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -481,13 +470,9 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
exec_begin.cwd, second_cwd,
|
||||
"exec turn should run from updated cwd"
|
||||
);
|
||||
let expected_command = format_with_current_shell("echo second turn");
|
||||
assert_eq!(
|
||||
exec_begin.command,
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string()
|
||||
],
|
||||
exec_begin.command, expected_command,
|
||||
"exec turn should run expected command"
|
||||
);
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use tokio::time::timeout;
|
||||
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
@@ -56,7 +56,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Create mock server with a single SSE response: the long sleep command
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000), // 10 seconds timeout in ms
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
@@ -41,7 +41,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Mock server: long-running shell command then (after abort) nothing else needed.
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000),
|
||||
|
||||
@@ -4,9 +4,11 @@ use app_test_support::create_apply_patch_sse_response;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_mock_chat_completions_server_unchecked;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell_display;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::ItemCompletedNotification;
|
||||
@@ -203,7 +205,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
// Mock server: first turn requests a shell call (elicitation), then completes.
|
||||
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -214,7 +216,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -328,6 +330,145 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_exec_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().to_path_buf();
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-decline",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "run python".to_string(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
let started_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let started_notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(started_notif.params.clone().expect("item/started params"))?;
|
||||
if let ThreadItem::CommandExecution { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution { id, status, .. } = started_command_execution else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
)
|
||||
.await??;
|
||||
let ServerRequest::CommandExecutionRequestApproval { request_id, params } = server_req else {
|
||||
panic!("expected CommandExecutionRequestApproval request")
|
||||
};
|
||||
assert_eq!(params.item_id, "call-decline");
|
||||
assert_eq!(params.thread_id, thread.id);
|
||||
assert_eq!(params.turn_id, turn.id);
|
||||
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let completed_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let completed_notif = mcp
|
||||
.read_stream_until_notification_message("item/completed")
|
||||
.await?;
|
||||
let completed: ItemCompletedNotification = serde_json::from_value(
|
||||
completed_notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/completed params"),
|
||||
)?;
|
||||
if let ThreadItem::CommandExecution { .. } = completed.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(completed.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
status,
|
||||
exit_code,
|
||||
aggregated_output,
|
||||
..
|
||||
} = completed_command_execution
|
||||
else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::Declined);
|
||||
assert!(exit_code.is_none());
|
||||
assert!(aggregated_output.is_none());
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -343,23 +484,15 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -465,7 +598,8 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(cwd, second_cwd);
|
||||
assert_eq!(command, "bash -lc 'echo second turn'");
|
||||
let expected_command = format_with_current_shell_display("echo second turn");
|
||||
assert_eq!(command, expected_command);
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
timeout(
|
||||
@@ -480,6 +614,10 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
if cfg!(windows) {
|
||||
// TODO apply_patch approvals are not parsed from powershell commands yet
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
@@ -626,6 +764,10 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
if cfg!(windows) {
|
||||
// TODO apply_patch approvals are not parsed from powershell commands yet
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
|
||||
@@ -30,6 +30,7 @@ pub use standalone_executable::main;
|
||||
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
|
||||
|
||||
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
|
||||
const APPLY_PATCH_SHELLS: [&str; 3] = ["bash", "zsh", "sh"];
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ApplyPatchError {
|
||||
@@ -96,6 +97,13 @@ pub struct ApplyPatchArgs {
|
||||
pub workdir: Option<String>,
|
||||
}
|
||||
|
||||
fn shell_supports_apply_patch(shell: &str) -> bool {
|
||||
std::path::Path::new(shell)
|
||||
.file_name()
|
||||
.and_then(|name| name.to_str())
|
||||
.is_some_and(|name| APPLY_PATCH_SHELLS.contains(&name))
|
||||
}
|
||||
|
||||
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
match argv {
|
||||
// Direct invocation: apply_patch <patch>
|
||||
@@ -104,7 +112,7 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
Err(e) => MaybeApplyPatch::PatchParseError(e),
|
||||
},
|
||||
// Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
[shell, flag, script] if shell_supports_apply_patch(shell) && flag == "-lc" => {
|
||||
match extract_apply_patch_from_bash(script) {
|
||||
Ok((body, workdir)) => match parse_patch(&body) {
|
||||
Ok(mut source) => {
|
||||
@@ -224,12 +232,12 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
|
||||
);
|
||||
}
|
||||
}
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
if parse_patch(script).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
[shell, flag, script]
|
||||
if shell_supports_apply_patch(shell)
|
||||
&& flag == "-lc"
|
||||
&& parse_patch(script).is_ok() =>
|
||||
{
|
||||
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ codex-cloud-tasks = { path = "../cloud-tasks" }
|
||||
codex-common = { workspace = true, features = ["cli"] }
|
||||
codex-core = { workspace = true }
|
||||
codex-exec = { workspace = true }
|
||||
codex-execpolicy = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-mcp-server = { workspace = true }
|
||||
codex-process-hardening = { workspace = true }
|
||||
|
||||
@@ -18,6 +18,7 @@ use codex_cli::login::run_logout;
|
||||
use codex_cloud_tasks::Cli as CloudTasksCli;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_exec::Cli as ExecCli;
|
||||
use codex_execpolicy::ExecPolicyCheckCommand;
|
||||
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
|
||||
use codex_tui::AppExitInfo;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
@@ -93,6 +94,10 @@ enum Subcommand {
|
||||
#[clap(visible_alias = "debug")]
|
||||
Sandbox(SandboxArgs),
|
||||
|
||||
/// Execpolicy tooling.
|
||||
#[clap(hide = true)]
|
||||
Execpolicy(ExecpolicyCommand),
|
||||
|
||||
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
|
||||
#[clap(visible_alias = "a")]
|
||||
Apply(ApplyCommand),
|
||||
@@ -162,6 +167,19 @@ enum SandboxCommand {
|
||||
Windows(WindowsCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct ExecpolicyCommand {
|
||||
#[command(subcommand)]
|
||||
sub: ExecpolicySubcommand,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
enum ExecpolicySubcommand {
|
||||
/// Check execpolicy files against a command.
|
||||
#[clap(name = "check")]
|
||||
Check(ExecPolicyCheckCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct LoginCommand {
|
||||
#[clap(skip)]
|
||||
@@ -327,6 +345,10 @@ fn run_update_action(action: UpdateAction) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_execpolicycheck(cmd: ExecPolicyCheckCommand) -> anyhow::Result<()> {
|
||||
cmd.run()
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser, Clone)]
|
||||
struct FeatureToggles {
|
||||
/// Enable a feature (repeatable). Equivalent to `-c features.<name>=true`.
|
||||
@@ -549,6 +571,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
.await?;
|
||||
}
|
||||
},
|
||||
Some(Subcommand::Execpolicy(ExecpolicyCommand { sub })) => match sub {
|
||||
ExecpolicySubcommand::Check(cmd) => run_execpolicycheck(cmd)?,
|
||||
},
|
||||
Some(Subcommand::Apply(mut apply_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut apply_cli.config_overrides,
|
||||
|
||||
@@ -79,6 +79,7 @@ pub struct GetArgs {
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
#[command(override_usage = "codex mcp add [OPTIONS] <NAME> (--url <URL> | -- <COMMAND>...)")]
|
||||
pub struct AddArgs {
|
||||
/// Name for the MCP server configuration.
|
||||
pub name: String,
|
||||
|
||||
58
codex-rs/cli/tests/execpolicy.rs
Normal file
58
codex-rs/cli/tests/execpolicy.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use std::fs;
|
||||
|
||||
use assert_cmd::Command;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn execpolicy_check_matches_expected_json() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let policy_path = codex_home.path().join("policy.codexpolicy");
|
||||
fs::write(
|
||||
&policy_path,
|
||||
r#"
|
||||
prefix_rule(
|
||||
pattern = ["git", "push"],
|
||||
decision = "forbidden",
|
||||
)
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::cargo_bin("codex")?
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
"check",
|
||||
"--policy",
|
||||
policy_path
|
||||
.to_str()
|
||||
.expect("policy path should be valid UTF-8"),
|
||||
"git",
|
||||
"push",
|
||||
"origin",
|
||||
"main",
|
||||
])
|
||||
.output()?;
|
||||
|
||||
assert!(output.status.success());
|
||||
let result: serde_json::Value = serde_json::from_slice(&output.stdout)?;
|
||||
assert_eq!(
|
||||
result,
|
||||
json!({
|
||||
"match": {
|
||||
"decision": "forbidden",
|
||||
"matchedRules": [
|
||||
{
|
||||
"prefixRuleMatch": {
|
||||
"matchedPrefix": ["git", "push"],
|
||||
"decision": "forbidden"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -19,6 +19,7 @@ async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
chardetng = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-async-utils = { workspace = true }
|
||||
@@ -32,11 +33,11 @@ codex-rmcp-client = { workspace = true }
|
||||
codex-utils-pty = { workspace = true }
|
||||
codex-utils-readiness = { workspace = true }
|
||||
codex-utils-string = { workspace = true }
|
||||
codex-utils-tokenizer = { workspace = true }
|
||||
codex-windows-sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
|
||||
dirs = { workspace = true }
|
||||
dunce = { workspace = true }
|
||||
env-flags = { workspace = true }
|
||||
encoding_rs = { workspace = true }
|
||||
eventsource-stream = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http = { workspace = true }
|
||||
|
||||
@@ -100,7 +100,7 @@ pub fn extract_bash_command(command: &[String]) -> Option<(&str, &str)> {
|
||||
if !matches!(flag.as_str(), "-lc" | "-c")
|
||||
|| !matches!(
|
||||
detect_shell_type(&PathBuf::from(shell)),
|
||||
Some(ShellType::Zsh) | Some(ShellType::Bash)
|
||||
Some(ShellType::Zsh) | Some(ShellType::Bash) | Some(ShellType::Sh)
|
||||
)
|
||||
{
|
||||
return None;
|
||||
|
||||
@@ -79,7 +79,6 @@ use crate::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::BackgroundEventEvent;
|
||||
use crate::protocol::DeprecationNoticeEvent;
|
||||
use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ExecApprovalRequestEvent;
|
||||
@@ -129,11 +128,11 @@ use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::CodexErrorInfo;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use codex_utils_readiness::Readiness;
|
||||
use codex_utils_readiness::ReadinessFlag;
|
||||
use codex_utils_tokenizer::warm_model_cache;
|
||||
|
||||
/// The high-level interface to the Codex system.
|
||||
/// It operates as a queue pair where you send submissions and receive events.
|
||||
@@ -493,7 +492,7 @@ impl Session {
|
||||
// - load history metadata
|
||||
let rollout_fut = RolloutRecorder::new(&config, rollout_params);
|
||||
|
||||
let default_shell_fut = shell::default_user_shell();
|
||||
let default_shell = shell::default_user_shell();
|
||||
let history_meta_fut = crate::message_history::history_metadata(&config);
|
||||
let auth_statuses_fut = compute_auth_statuses(
|
||||
config.mcp_servers.iter(),
|
||||
@@ -501,12 +500,8 @@ impl Session {
|
||||
);
|
||||
|
||||
// Join all independent futures.
|
||||
let (rollout_recorder, default_shell, (history_log_id, history_entry_count), auth_statuses) = tokio::join!(
|
||||
rollout_fut,
|
||||
default_shell_fut,
|
||||
history_meta_fut,
|
||||
auth_statuses_fut
|
||||
);
|
||||
let (rollout_recorder, (history_log_id, history_entry_count), auth_statuses) =
|
||||
tokio::join!(rollout_fut, history_meta_fut, auth_statuses_fut);
|
||||
|
||||
let rollout_recorder = rollout_recorder.map_err(|e| {
|
||||
error!("failed to initialize rollout recorder: {e:#}");
|
||||
@@ -548,7 +543,6 @@ impl Session {
|
||||
config.model_reasoning_effort,
|
||||
config.model_reasoning_summary,
|
||||
config.model_context_window,
|
||||
config.model_max_output_tokens,
|
||||
config.model_auto_compact_token_limit,
|
||||
config.approval_policy,
|
||||
config.sandbox_policy.clone(),
|
||||
@@ -559,9 +553,6 @@ impl Session {
|
||||
// Create the mutable state for the Session.
|
||||
let state = SessionState::new(session_configuration.clone());
|
||||
|
||||
// Warm the tokenizer cache for the session model without blocking startup.
|
||||
warm_model_cache(&session_configuration.model);
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
@@ -1057,7 +1048,7 @@ impl Session {
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
Some(self.user_shell().clone()),
|
||||
self.user_shell().clone(),
|
||||
)));
|
||||
items
|
||||
}
|
||||
@@ -1197,9 +1188,14 @@ impl Session {
|
||||
&self,
|
||||
turn_context: &TurnContext,
|
||||
message: impl Into<String>,
|
||||
codex_error: CodexErr,
|
||||
) {
|
||||
let codex_error_info = CodexErrorInfo::ResponseStreamDisconnected {
|
||||
http_status_code: codex_error.http_status_code_value(),
|
||||
};
|
||||
let event = EventMsg::StreamError(StreamErrorEvent {
|
||||
message: message.into(),
|
||||
codex_error_info: Some(codex_error_info),
|
||||
});
|
||||
self.send_event(turn_context, event).await;
|
||||
}
|
||||
@@ -1445,6 +1441,7 @@ mod handlers {
|
||||
use crate::tasks::UndoTask;
|
||||
use crate::tasks::UserShellCommandTask;
|
||||
use codex_protocol::custom_prompts::CustomPrompt;
|
||||
use codex_protocol::protocol::CodexErrorInfo;
|
||||
use codex_protocol::protocol::ErrorEvent;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
@@ -1691,6 +1688,7 @@ mod handlers {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::Error(ErrorEvent {
|
||||
message: "Failed to shutdown rollout recorder".to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::Other),
|
||||
}),
|
||||
};
|
||||
sess.send_event_raw(event).await;
|
||||
@@ -1945,9 +1943,7 @@ pub(crate) async fn run_task(
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Turn error: {e:#}");
|
||||
let event = EventMsg::Error(ErrorEvent {
|
||||
message: e.to_string(),
|
||||
});
|
||||
let event = EventMsg::Error(e.to_error_event(None));
|
||||
sess.send_event(&turn_context, event).await;
|
||||
// let the user continue the conversation
|
||||
break;
|
||||
@@ -2072,6 +2068,7 @@ async fn run_turn(
|
||||
sess.notify_stream_error(
|
||||
&turn_context,
|
||||
format!("Reconnecting... {retries}/{max_retries}"),
|
||||
e,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -2390,6 +2387,7 @@ mod tests {
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::shell::default_user_shell;
|
||||
use crate::tools::format_exec_output_str;
|
||||
|
||||
use crate::protocol::CompactedItem;
|
||||
@@ -2629,7 +2627,7 @@ mod tests {
|
||||
unified_exec_manager: UnifiedExecSessionManager::default(),
|
||||
notifier: UserNotifier::new(None),
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: shell::Shell::Unknown,
|
||||
user_shell: default_user_shell(),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_event_manager: otel_event_manager.clone(),
|
||||
@@ -2707,7 +2705,7 @@ mod tests {
|
||||
unified_exec_manager: UnifiedExecSessionManager::default(),
|
||||
notifier: UserNotifier::new(None),
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: shell::Shell::Unknown,
|
||||
user_shell: default_user_shell(),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_event_manager: otel_event_manager.clone(),
|
||||
@@ -3052,6 +3050,7 @@ mod tests {
|
||||
let session = Arc::new(session);
|
||||
let mut turn_context = Arc::new(turn_context_raw);
|
||||
|
||||
let timeout_ms = 1000;
|
||||
let params = ExecParams {
|
||||
command: if cfg!(windows) {
|
||||
vec![
|
||||
@@ -3067,7 +3066,7 @@ mod tests {
|
||||
]
|
||||
},
|
||||
cwd: turn_context.cwd.clone(),
|
||||
timeout_ms: Some(1000),
|
||||
expiration: timeout_ms.into(),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: Some(true),
|
||||
justification: Some("test".to_string()),
|
||||
@@ -3076,7 +3075,12 @@ mod tests {
|
||||
|
||||
let params2 = ExecParams {
|
||||
with_escalated_permissions: Some(false),
|
||||
..params.clone()
|
||||
command: params.command.clone(),
|
||||
cwd: params.cwd.clone(),
|
||||
expiration: timeout_ms.into(),
|
||||
env: HashMap::new(),
|
||||
justification: params.justification.clone(),
|
||||
arg0: None,
|
||||
};
|
||||
|
||||
let turn_diff_tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
|
||||
@@ -3096,7 +3100,7 @@ mod tests {
|
||||
arguments: serde_json::json!({
|
||||
"command": params.command.clone(),
|
||||
"workdir": Some(turn_context.cwd.to_string_lossy().to_string()),
|
||||
"timeout_ms": params.timeout_ms,
|
||||
"timeout_ms": params.expiration.timeout_ms(),
|
||||
"with_escalated_permissions": params.with_escalated_permissions,
|
||||
"justification": params.justification.clone(),
|
||||
})
|
||||
@@ -3133,7 +3137,7 @@ mod tests {
|
||||
arguments: serde_json::json!({
|
||||
"command": params2.command.clone(),
|
||||
"workdir": Some(turn_context.cwd.to_string_lossy().to_string()),
|
||||
"timeout_ms": params2.timeout_ms,
|
||||
"timeout_ms": params2.expiration.timeout_ms(),
|
||||
"with_escalated_permissions": params2.with_escalated_permissions,
|
||||
"justification": params2.justification.clone(),
|
||||
})
|
||||
|
||||
@@ -267,6 +267,20 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn windows_powershell_full_path_is_safe() {
|
||||
if !cfg!(windows) {
|
||||
// Windows only because on Linux path splitting doesn't handle `/` separators properly
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(is_known_safe_command(&vec_str(&[
|
||||
r"C:\Program Files\PowerShell\7\pwsh.exe",
|
||||
"-Command",
|
||||
"Get-Location",
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bash_lc_safe_examples() {
|
||||
assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls"])));
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use shlex::split as shlex_split;
|
||||
use std::path::Path;
|
||||
|
||||
/// On Windows, we conservatively allow only clearly read-only PowerShell invocations
|
||||
/// that match a small safelist. Anything else (including direct CMD commands) is unsafe.
|
||||
@@ -131,8 +132,14 @@ fn split_into_commands(tokens: Vec<String>) -> Option<Vec<Vec<String>>> {
|
||||
|
||||
/// Returns true when the executable name is one of the supported PowerShell binaries.
|
||||
fn is_powershell_executable(exe: &str) -> bool {
|
||||
let executable_name = Path::new(exe)
|
||||
.file_name()
|
||||
.and_then(|osstr| osstr.to_str())
|
||||
.unwrap_or(exe)
|
||||
.to_ascii_lowercase();
|
||||
|
||||
matches!(
|
||||
exe.to_ascii_lowercase().as_str(),
|
||||
executable_name.as_str(),
|
||||
"powershell" | "powershell.exe" | "pwsh" | "pwsh.exe"
|
||||
)
|
||||
}
|
||||
@@ -313,6 +320,27 @@ mod tests {
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_full_path_powershell_invocations() {
|
||||
if !cfg!(windows) {
|
||||
// Windows only because on Linux path splitting doesn't handle `/` separators properly
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
r"C:\Program Files\PowerShell\7\pwsh.exe",
|
||||
"-NoProfile",
|
||||
"-Command",
|
||||
"Get-ChildItem -Path .",
|
||||
])));
|
||||
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe",
|
||||
"-Command",
|
||||
"Get-Content Cargo.toml",
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn allows_read_only_pipelines_and_git_usage() {
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::error::Result as CodexResult;
|
||||
use crate::features::Feature;
|
||||
use crate::protocol::AgentMessageEvent;
|
||||
use crate::protocol::CompactedItem;
|
||||
use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
@@ -128,9 +127,7 @@ async fn run_compact_task_inner(
|
||||
continue;
|
||||
}
|
||||
sess.set_total_tokens_full(turn_context.as_ref()).await;
|
||||
let event = EventMsg::Error(ErrorEvent {
|
||||
message: e.to_string(),
|
||||
});
|
||||
let event = EventMsg::Error(e.to_error_event(None));
|
||||
sess.send_event(&turn_context, event).await;
|
||||
return;
|
||||
}
|
||||
@@ -141,14 +138,13 @@ async fn run_compact_task_inner(
|
||||
sess.notify_stream_error(
|
||||
turn_context.as_ref(),
|
||||
format!("Reconnecting... {retries}/{max_retries}"),
|
||||
e,
|
||||
)
|
||||
.await;
|
||||
tokio::time::sleep(delay).await;
|
||||
continue;
|
||||
} else {
|
||||
let event = EventMsg::Error(ErrorEvent {
|
||||
message: e.to_string(),
|
||||
});
|
||||
let event = EventMsg::Error(e.to_error_event(None));
|
||||
sess.send_event(&turn_context, event).await;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ use crate::codex::TurnContext;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::AgentMessageEvent;
|
||||
use crate::protocol::CompactedItem;
|
||||
use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::RolloutItem;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
@@ -30,9 +29,9 @@ pub(crate) async fn run_remote_compact_task(sess: Arc<Session>, turn_context: Ar
|
||||
|
||||
async fn run_remote_compact_task_inner(sess: &Arc<Session>, turn_context: &Arc<TurnContext>) {
|
||||
if let Err(err) = run_remote_compact_task_inner_impl(sess, turn_context).await {
|
||||
let event = EventMsg::Error(ErrorEvent {
|
||||
message: format!("Error running remote compact task: {err}"),
|
||||
});
|
||||
let event = EventMsg::Error(
|
||||
err.to_error_event(Some("Error running remote compact task".to_string())),
|
||||
);
|
||||
sess.send_event(turn_context, event).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ use crate::config::types::Notice;
|
||||
use anyhow::Context;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_utils_tokenizer::warm_model_cache;
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
@@ -231,9 +230,6 @@ impl ConfigDocument {
|
||||
fn apply(&mut self, edit: &ConfigEdit) -> anyhow::Result<bool> {
|
||||
match edit {
|
||||
ConfigEdit::SetModel { model, effort } => Ok({
|
||||
if let Some(model) = &model {
|
||||
warm_model_cache(model)
|
||||
}
|
||||
let mut mutated = false;
|
||||
mutated |= self.write_profile_value(
|
||||
&["model"],
|
||||
|
||||
@@ -86,9 +86,6 @@ pub struct Config {
|
||||
/// Size of the context window for the model, in tokens.
|
||||
pub model_context_window: Option<i64>,
|
||||
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<i64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
@@ -160,6 +157,9 @@ pub struct Config {
|
||||
/// and turn completions when not focused.
|
||||
pub tui_notifications: Notifications,
|
||||
|
||||
/// Enable ASCII animations and shimmer effects in the TUI.
|
||||
pub animations: bool,
|
||||
|
||||
/// The directory that should be treated as the current working directory
|
||||
/// for the session. All relative paths inside the business-logic layer are
|
||||
/// resolved against this path.
|
||||
@@ -567,9 +567,6 @@ pub struct ConfigToml {
|
||||
/// Size of the context window for the model, in tokens.
|
||||
pub model_context_window: Option<i64>,
|
||||
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<i64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
@@ -1119,11 +1116,6 @@ impl Config {
|
||||
let model_context_window = cfg
|
||||
.model_context_window
|
||||
.or_else(|| openai_model_info.as_ref().map(|info| info.context_window));
|
||||
let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| {
|
||||
openai_model_info
|
||||
.as_ref()
|
||||
.map(|info| info.max_output_tokens)
|
||||
});
|
||||
let model_auto_compact_token_limit = cfg.model_auto_compact_token_limit.or_else(|| {
|
||||
openai_model_info
|
||||
.as_ref()
|
||||
@@ -1175,7 +1167,6 @@ impl Config {
|
||||
review_model,
|
||||
model_family,
|
||||
model_context_window,
|
||||
model_max_output_tokens,
|
||||
model_auto_compact_token_limit,
|
||||
model_provider_id,
|
||||
model_provider,
|
||||
@@ -1253,6 +1244,7 @@ impl Config {
|
||||
.as_ref()
|
||||
.map(|t| t.notifications.clone())
|
||||
.unwrap_or_default(),
|
||||
animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true),
|
||||
otel: {
|
||||
let t: OtelConfigToml = cfg.otel.unwrap_or_default();
|
||||
let log_user_prompt = t.log_user_prompt.unwrap_or(false);
|
||||
@@ -2957,7 +2949,6 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: Some(180_000),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
@@ -3003,6 +2994,7 @@ model_verbosity = "high"
|
||||
notices: Default::default(),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
},
|
||||
o3_profile_config
|
||||
@@ -3029,7 +3021,6 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
|
||||
model_context_window: Some(16_385),
|
||||
model_max_output_tokens: Some(4_096),
|
||||
model_auto_compact_token_limit: Some(14_746),
|
||||
model_provider_id: "openai-chat-completions".to_string(),
|
||||
model_provider: fixture.openai_chat_completions_provider.clone(),
|
||||
@@ -3075,6 +3066,7 @@ model_verbosity = "high"
|
||||
notices: Default::default(),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3116,7 +3108,6 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: Some(180_000),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
@@ -3162,6 +3153,7 @@ model_verbosity = "high"
|
||||
notices: Default::default(),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3189,7 +3181,6 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-5.1").expect("known model slug"),
|
||||
model_context_window: Some(272_000),
|
||||
model_max_output_tokens: Some(128_000),
|
||||
model_auto_compact_token_limit: Some(244_800),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
@@ -3235,6 +3226,7 @@ model_verbosity = "high"
|
||||
notices: Default::default(),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
|
||||
@@ -363,6 +363,15 @@ pub struct Tui {
|
||||
/// Defaults to `true`.
|
||||
#[serde(default)]
|
||||
pub notifications: Notifications,
|
||||
|
||||
/// Enable animations (welcome screen, shimmer effects, spinners).
|
||||
/// Defaults to `true`.
|
||||
#[serde(default = "default_true")]
|
||||
pub animations: bool,
|
||||
}
|
||||
|
||||
const fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use crate::codex::TurnContext;
|
||||
use crate::context_manager::normalize;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::approx_token_count;
|
||||
use crate::truncate::truncate_function_output_items_with_policy;
|
||||
use crate::truncate::truncate_text;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo;
|
||||
use codex_utils_tokenizer::Tokenizer;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Transcript of conversation history
|
||||
@@ -74,26 +74,21 @@ impl ContextManager {
|
||||
history
|
||||
}
|
||||
|
||||
// Estimate the number of tokens in the history. Return None if no tokenizer
|
||||
// is available. This does not consider the reasoning traces.
|
||||
// /!\ The value is a lower bound estimate and does not represent the exact
|
||||
// context length.
|
||||
// Estimate token usage using byte-based heuristics from the truncation helpers.
|
||||
// This is a coarse lower bound, not a tokenizer-accurate count.
|
||||
pub(crate) fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> {
|
||||
let model = turn_context.client.get_model();
|
||||
let tokenizer = Tokenizer::for_model(model.as_str()).ok()?;
|
||||
let model_family = turn_context.client.get_model_family();
|
||||
let base_tokens =
|
||||
i64::try_from(approx_token_count(model_family.base_instructions.as_str()))
|
||||
.unwrap_or(i64::MAX);
|
||||
|
||||
Some(
|
||||
self.items
|
||||
.iter()
|
||||
.map(|item| {
|
||||
serde_json::to_string(&item)
|
||||
.map(|item| tokenizer.count(&item))
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.sum::<i64>()
|
||||
+ tokenizer.count(model_family.base_instructions.as_str()),
|
||||
)
|
||||
let items_tokens = self.items.iter().fold(0i64, |acc, item| {
|
||||
let serialized = serde_json::to_string(item).unwrap_or_default();
|
||||
let item_tokens = i64::try_from(approx_token_count(&serialized)).unwrap_or(i64::MAX);
|
||||
acc.saturating_add(item_tokens)
|
||||
});
|
||||
|
||||
Some(base_tokens.saturating_add(items_tokens))
|
||||
}
|
||||
|
||||
pub(crate) fn remove_first_item(&mut self) {
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::codex::TurnContext;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
use crate::shell::default_user_shell;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -28,7 +29,7 @@ pub(crate) struct EnvironmentContext {
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub network_access: Option<NetworkAccess>,
|
||||
pub writable_roots: Option<Vec<PathBuf>>,
|
||||
pub shell: Option<Shell>,
|
||||
pub shell: Shell,
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
@@ -36,7 +37,7 @@ impl EnvironmentContext {
|
||||
cwd: Option<PathBuf>,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
shell: Option<Shell>,
|
||||
shell: Shell,
|
||||
) -> Self {
|
||||
Self {
|
||||
cwd,
|
||||
@@ -110,7 +111,7 @@ impl EnvironmentContext {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
EnvironmentContext::new(cwd, approval_policy, sandbox_policy, None)
|
||||
EnvironmentContext::new(cwd, approval_policy, sandbox_policy, default_user_shell())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,7 +122,7 @@ impl From<&TurnContext> for EnvironmentContext {
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
default_user_shell(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -169,11 +170,9 @@ impl EnvironmentContext {
|
||||
}
|
||||
lines.push(" </writable_roots>".to_string());
|
||||
}
|
||||
if let Some(shell) = self.shell
|
||||
&& let Some(shell_name) = shell.name()
|
||||
{
|
||||
lines.push(format!(" <shell>{shell_name}</shell>"));
|
||||
}
|
||||
|
||||
let shell_name = self.shell.name();
|
||||
lines.push(format!(" <shell>{shell_name}</shell>"));
|
||||
lines.push(ENVIRONMENT_CONTEXT_CLOSE_TAG.to_string());
|
||||
lines.join("\n")
|
||||
}
|
||||
@@ -193,12 +192,18 @@ impl From<EnvironmentContext> for ResponseItem {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::shell::BashShell;
|
||||
use crate::shell::ZshShell;
|
||||
use crate::shell::ShellType;
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn fake_shell() -> Shell {
|
||||
Shell {
|
||||
shell_type: ShellType::Bash,
|
||||
shell_path: PathBuf::from("/bin/bash"),
|
||||
}
|
||||
}
|
||||
|
||||
fn workspace_write_policy(writable_roots: Vec<&str>, network_access: bool) -> SandboxPolicy {
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: writable_roots.into_iter().map(PathBuf::from).collect(),
|
||||
@@ -214,7 +219,7 @@ mod tests {
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], false)),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
@@ -226,6 +231,7 @@ mod tests {
|
||||
<root>/repo</root>
|
||||
<root>/tmp</root>
|
||||
</writable_roots>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
@@ -237,13 +243,14 @@ mod tests {
|
||||
None,
|
||||
Some(AskForApproval::Never),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
@@ -255,13 +262,14 @@ mod tests {
|
||||
None,
|
||||
Some(AskForApproval::OnFailure),
|
||||
Some(SandboxPolicy::DangerFullAccess),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-failure</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
@@ -274,13 +282,13 @@ mod tests {
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::Never),
|
||||
Some(workspace_write_policy(vec!["/repo"], true)),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
@@ -291,13 +299,13 @@ mod tests {
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_read_only_policy()),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_workspace_write_policy()),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
@@ -309,13 +317,13 @@ mod tests {
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], true)),
|
||||
None,
|
||||
fake_shell(),
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
@@ -327,17 +335,19 @@ mod tests {
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Bash(BashShell {
|
||||
Shell {
|
||||
shell_type: ShellType::Bash,
|
||||
shell_path: "/bin/bash".into(),
|
||||
})),
|
||||
},
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Zsh(ZshShell {
|
||||
Shell {
|
||||
shell_type: ShellType::Zsh,
|
||||
shell_path: "/bin/zsh".into(),
|
||||
})),
|
||||
},
|
||||
);
|
||||
|
||||
assert!(context1.equals_except_shell(&context2));
|
||||
|
||||
@@ -10,6 +10,8 @@ use chrono::Local;
|
||||
use chrono::Utc;
|
||||
use codex_async_utils::CancelErr;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::CodexErrorInfo;
|
||||
use codex_protocol::protocol::ErrorEvent;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use reqwest::StatusCode;
|
||||
use serde_json;
|
||||
@@ -430,6 +432,57 @@ impl CodexErr {
|
||||
pub fn downcast_ref<T: std::any::Any>(&self) -> Option<&T> {
|
||||
(self as &dyn std::any::Any).downcast_ref::<T>()
|
||||
}
|
||||
|
||||
/// Translate core error to client-facing protocol error.
|
||||
pub fn to_codex_protocol_error(&self) -> CodexErrorInfo {
|
||||
match self {
|
||||
CodexErr::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded,
|
||||
CodexErr::UsageLimitReached(_)
|
||||
| CodexErr::QuotaExceeded
|
||||
| CodexErr::UsageNotIncluded => CodexErrorInfo::UsageLimitExceeded,
|
||||
CodexErr::RetryLimit(_) => CodexErrorInfo::ResponseTooManyFailedAttempts {
|
||||
http_status_code: self.http_status_code_value(),
|
||||
},
|
||||
CodexErr::ConnectionFailed(_) => CodexErrorInfo::HttpConnectionFailed {
|
||||
http_status_code: self.http_status_code_value(),
|
||||
},
|
||||
CodexErr::ResponseStreamFailed(_) => CodexErrorInfo::ResponseStreamConnectionFailed {
|
||||
http_status_code: self.http_status_code_value(),
|
||||
},
|
||||
CodexErr::RefreshTokenFailed(_) => CodexErrorInfo::Unauthorized,
|
||||
CodexErr::SessionConfiguredNotFirstEvent
|
||||
| CodexErr::InternalServerError
|
||||
| CodexErr::InternalAgentDied => CodexErrorInfo::InternalServerError,
|
||||
CodexErr::UnsupportedOperation(_) | CodexErr::ConversationNotFound(_) => {
|
||||
CodexErrorInfo::BadRequest
|
||||
}
|
||||
CodexErr::Sandbox(_) => CodexErrorInfo::SandboxError,
|
||||
_ => CodexErrorInfo::Other,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_error_event(&self, message_prefix: Option<String>) -> ErrorEvent {
|
||||
let error_message = self.to_string();
|
||||
let message: String = match message_prefix {
|
||||
Some(prefix) => format!("{prefix}: {error_message}"),
|
||||
None => error_message,
|
||||
};
|
||||
ErrorEvent {
|
||||
message,
|
||||
codex_error_info: Some(self.to_codex_protocol_error()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn http_status_code_value(&self) -> Option<u16> {
|
||||
let http_status_code = match self {
|
||||
CodexErr::RetryLimit(err) => Some(err.status),
|
||||
CodexErr::UnexpectedStatus(err) => Some(err.status),
|
||||
CodexErr::ConnectionFailed(err) => err.source.status(),
|
||||
CodexErr::ResponseStreamFailed(err) => err.source.status(),
|
||||
_ => None,
|
||||
};
|
||||
http_status_code.as_ref().map(StatusCode::as_u16)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_error_message_ui(e: &CodexErr) -> String {
|
||||
@@ -478,6 +531,10 @@ mod tests {
|
||||
use chrono::Utc;
|
||||
use codex_protocol::protocol::RateLimitWindow;
|
||||
use pretty_assertions::assert_eq;
|
||||
use reqwest::Response;
|
||||
use reqwest::ResponseBuilderExt;
|
||||
use reqwest::StatusCode;
|
||||
use reqwest::Url;
|
||||
|
||||
fn rate_limit_snapshot() -> RateLimitSnapshot {
|
||||
let primary_reset_at = Utc
|
||||
@@ -573,6 +630,33 @@ mod tests {
|
||||
assert_eq!(get_error_message_ui(&err), "stdout only");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_error_event_handles_response_stream_failed() {
|
||||
let response = http::Response::builder()
|
||||
.status(StatusCode::TOO_MANY_REQUESTS)
|
||||
.url(Url::parse("http://example.com").unwrap())
|
||||
.body("")
|
||||
.unwrap();
|
||||
let source = Response::from(response).error_for_status_ref().unwrap_err();
|
||||
let err = CodexErr::ResponseStreamFailed(ResponseStreamFailed {
|
||||
source,
|
||||
request_id: Some("req-123".to_string()),
|
||||
});
|
||||
|
||||
let event = err.to_error_event(Some("prefix".to_string()));
|
||||
|
||||
assert_eq!(
|
||||
event.message,
|
||||
"prefix: Error while reading the server response: HTTP status client error (429 Too Many Requests) for url (http://example.com/), request id: req-123"
|
||||
);
|
||||
assert_eq!(
|
||||
event.codex_error_info,
|
||||
Some(CodexErrorInfo::ResponseStreamConnectionFailed {
|
||||
http_status_code: Some(429)
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_denied_reports_exit_code_when_no_output_available() {
|
||||
let output = ExecToolCallOutput {
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
|
||||
..
|
||||
} => Some(TurnItem::WebSearch(WebSearchItem {
|
||||
id: id.clone().unwrap_or_default(),
|
||||
query: query.clone(),
|
||||
query: query.clone().unwrap_or_default(),
|
||||
})),
|
||||
_ => None,
|
||||
}
|
||||
@@ -306,7 +306,7 @@ mod tests {
|
||||
id: Some("ws_1".to_string()),
|
||||
status: Some("completed".to_string()),
|
||||
action: WebSearchAction::Search {
|
||||
query: "weather".to_string(),
|
||||
query: Some("weather".to_string()),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ use tokio::io::AsyncRead;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::BufReader;
|
||||
use tokio::process::Child;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
@@ -28,8 +29,9 @@ use crate::sandboxing::ExecEnv;
|
||||
use crate::sandboxing::SandboxManager;
|
||||
use crate::spawn::StdioPolicy;
|
||||
use crate::spawn::spawn_child_async;
|
||||
use crate::text_encoding::bytes_to_string_smart;
|
||||
|
||||
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
|
||||
pub const DEFAULT_EXEC_COMMAND_TIMEOUT_MS: u64 = 10_000;
|
||||
|
||||
// Hardcode these since it does not seem worth including the libc crate just
|
||||
// for these.
|
||||
@@ -46,20 +48,59 @@ const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
|
||||
/// Aggregation still collects full output; only the live event stream is capped.
|
||||
pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct ExecParams {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub timeout_ms: Option<u64>,
|
||||
pub expiration: ExecExpiration,
|
||||
pub env: HashMap<String, String>,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub arg0: Option<String>,
|
||||
}
|
||||
|
||||
impl ExecParams {
|
||||
pub fn timeout_duration(&self) -> Duration {
|
||||
Duration::from_millis(self.timeout_ms.unwrap_or(DEFAULT_TIMEOUT_MS))
|
||||
/// Mechanism to terminate an exec invocation before it finishes naturally.
|
||||
#[derive(Debug)]
|
||||
pub enum ExecExpiration {
|
||||
Timeout(Duration),
|
||||
DefaultTimeout,
|
||||
Cancellation(CancellationToken),
|
||||
}
|
||||
|
||||
impl From<Option<u64>> for ExecExpiration {
|
||||
fn from(timeout_ms: Option<u64>) -> Self {
|
||||
timeout_ms.map_or(ExecExpiration::DefaultTimeout, |timeout_ms| {
|
||||
ExecExpiration::Timeout(Duration::from_millis(timeout_ms))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for ExecExpiration {
|
||||
fn from(timeout_ms: u64) -> Self {
|
||||
ExecExpiration::Timeout(Duration::from_millis(timeout_ms))
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecExpiration {
|
||||
async fn wait(self) {
|
||||
match self {
|
||||
ExecExpiration::Timeout(duration) => tokio::time::sleep(duration).await,
|
||||
ExecExpiration::DefaultTimeout => {
|
||||
tokio::time::sleep(Duration::from_millis(DEFAULT_EXEC_COMMAND_TIMEOUT_MS)).await
|
||||
}
|
||||
ExecExpiration::Cancellation(cancel) => {
|
||||
cancel.cancelled().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If ExecExpiration is a timeout, returns the timeout in milliseconds.
|
||||
pub(crate) fn timeout_ms(&self) -> Option<u64> {
|
||||
match self {
|
||||
ExecExpiration::Timeout(duration) => Some(duration.as_millis() as u64),
|
||||
ExecExpiration::DefaultTimeout => Some(DEFAULT_EXEC_COMMAND_TIMEOUT_MS),
|
||||
ExecExpiration::Cancellation(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +136,7 @@ pub async fn process_exec_tool_call(
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd,
|
||||
timeout_ms,
|
||||
expiration,
|
||||
env,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
@@ -114,7 +155,7 @@ pub async fn process_exec_tool_call(
|
||||
args: args.to_vec(),
|
||||
cwd,
|
||||
env,
|
||||
timeout_ms,
|
||||
expiration,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
};
|
||||
@@ -122,7 +163,7 @@ pub async fn process_exec_tool_call(
|
||||
let manager = SandboxManager::new();
|
||||
let exec_env = manager
|
||||
.transform(
|
||||
&spec,
|
||||
spec,
|
||||
sandbox_policy,
|
||||
sandbox_type,
|
||||
sandbox_cwd,
|
||||
@@ -131,7 +172,7 @@ pub async fn process_exec_tool_call(
|
||||
.map_err(CodexErr::from)?;
|
||||
|
||||
// Route through the sandboxing module for a single, unified execution path.
|
||||
crate::sandboxing::execute_env(&exec_env, sandbox_policy, stdout_stream).await
|
||||
crate::sandboxing::execute_env(exec_env, sandbox_policy, stdout_stream).await
|
||||
}
|
||||
|
||||
pub(crate) async fn execute_exec_env(
|
||||
@@ -143,7 +184,7 @@ pub(crate) async fn execute_exec_env(
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
timeout_ms,
|
||||
expiration,
|
||||
sandbox,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
@@ -153,7 +194,7 @@ pub(crate) async fn execute_exec_env(
|
||||
let params = ExecParams {
|
||||
command,
|
||||
cwd,
|
||||
timeout_ms,
|
||||
expiration,
|
||||
env,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
@@ -178,9 +219,12 @@ async fn exec_windows_sandbox(
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
timeout_ms,
|
||||
expiration,
|
||||
..
|
||||
} = params;
|
||||
// TODO(iceweasel-oai): run_windows_sandbox_capture should support all
|
||||
// variants of ExecExpiration, not just timeout.
|
||||
let timeout_ms = expiration.timeout_ms();
|
||||
|
||||
let policy_str = serde_json::to_string(sandbox_policy).map_err(|err| {
|
||||
CodexErr::Io(io::Error::other(format!(
|
||||
@@ -414,7 +458,7 @@ impl StreamOutput<String> {
|
||||
impl StreamOutput<Vec<u8>> {
|
||||
pub fn from_utf8_lossy(&self) -> StreamOutput<String> {
|
||||
StreamOutput {
|
||||
text: String::from_utf8_lossy(&self.text).to_string(),
|
||||
text: bytes_to_string_smart(&self.text),
|
||||
truncated_after_lines: self.truncated_after_lines,
|
||||
}
|
||||
}
|
||||
@@ -448,12 +492,12 @@ async fn exec(
|
||||
{
|
||||
return exec_windows_sandbox(params, sandbox_policy).await;
|
||||
}
|
||||
let timeout = params.timeout_duration();
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
arg0,
|
||||
expiration,
|
||||
..
|
||||
} = params;
|
||||
|
||||
@@ -474,14 +518,14 @@ async fn exec(
|
||||
env,
|
||||
)
|
||||
.await?;
|
||||
consume_truncated_output(child, timeout, stdout_stream).await
|
||||
consume_truncated_output(child, expiration, stdout_stream).await
|
||||
}
|
||||
|
||||
/// Consumes the output of a child process, truncating it so it is suitable for
|
||||
/// use as the output of a `shell` tool call. Also enforces specified timeout.
|
||||
async fn consume_truncated_output(
|
||||
mut child: Child,
|
||||
timeout: Duration,
|
||||
expiration: ExecExpiration,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
// Both stdout and stderr were configured with `Stdio::piped()`
|
||||
@@ -515,20 +559,14 @@ async fn consume_truncated_output(
|
||||
));
|
||||
|
||||
let (exit_status, timed_out) = tokio::select! {
|
||||
result = tokio::time::timeout(timeout, child.wait()) => {
|
||||
match result {
|
||||
Ok(status_result) => {
|
||||
let exit_status = status_result?;
|
||||
(exit_status, false)
|
||||
}
|
||||
Err(_) => {
|
||||
// timeout
|
||||
kill_child_process_group(&mut child)?;
|
||||
child.start_kill()?;
|
||||
// Debatable whether `child.wait().await` should be called here.
|
||||
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE), true)
|
||||
}
|
||||
}
|
||||
status_result = child.wait() => {
|
||||
let exit_status = status_result?;
|
||||
(exit_status, false)
|
||||
}
|
||||
_ = expiration.wait() => {
|
||||
kill_child_process_group(&mut child)?;
|
||||
child.start_kill()?;
|
||||
(synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE), true)
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
kill_child_process_group(&mut child)?;
|
||||
@@ -780,6 +818,15 @@ mod tests {
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn kill_child_process_group_kills_grandchildren_on_timeout() -> Result<()> {
|
||||
// On Linux/macOS, /bin/bash is typically present; on FreeBSD/OpenBSD,
|
||||
// prefer /bin/sh to avoid NotFound errors.
|
||||
#[cfg(any(target_os = "freebsd", target_os = "openbsd"))]
|
||||
let command = vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
"sleep 60 & echo $!; sleep 60".to_string(),
|
||||
];
|
||||
#[cfg(all(unix, not(any(target_os = "freebsd", target_os = "openbsd"))))]
|
||||
let command = vec![
|
||||
"/bin/bash".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -789,7 +836,7 @@ mod tests {
|
||||
let params = ExecParams {
|
||||
command,
|
||||
cwd: std::env::current_dir()?,
|
||||
timeout_ms: Some(500),
|
||||
expiration: 500.into(),
|
||||
env,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
@@ -823,4 +870,62 @@ mod tests {
|
||||
assert!(killed, "grandchild process with pid {pid} is still alive");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn process_exec_tool_call_respects_cancellation_token() -> Result<()> {
|
||||
let command = long_running_command();
|
||||
let cwd = std::env::current_dir()?;
|
||||
let env: HashMap<String, String> = std::env::vars().collect();
|
||||
let cancel_token = CancellationToken::new();
|
||||
let cancel_tx = cancel_token.clone();
|
||||
let params = ExecParams {
|
||||
command,
|
||||
cwd: cwd.clone(),
|
||||
expiration: ExecExpiration::Cancellation(cancel_token),
|
||||
env,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
};
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(1_000)).await;
|
||||
cancel_tx.cancel();
|
||||
});
|
||||
let result = process_exec_tool_call(
|
||||
params,
|
||||
SandboxType::None,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
cwd.as_path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let output = match result {
|
||||
Err(CodexErr::Sandbox(SandboxErr::Timeout { output })) => output,
|
||||
other => panic!("expected timeout error, got {other:?}"),
|
||||
};
|
||||
assert!(output.timed_out);
|
||||
assert_eq!(output.exit_code, EXEC_TIMEOUT_EXIT_CODE);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn long_running_command() -> Vec<String> {
|
||||
vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
"sleep 30".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn long_running_command() -> Vec<String> {
|
||||
vec![
|
||||
"powershell.exe".to_string(),
|
||||
"-NonInteractive".to_string(),
|
||||
"-NoLogo".to_string(),
|
||||
"-Command".to_string(),
|
||||
"Start-Sleep -Seconds 30".to_string(),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +109,7 @@ fn evaluate_with_policy(
|
||||
}
|
||||
Decision::Allow => Some(ApprovalRequirement::Skip),
|
||||
},
|
||||
Evaluation::NoMatch => None,
|
||||
Evaluation::NoMatch { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +206,7 @@ mod tests {
|
||||
let commands = [vec!["rm".to_string()]];
|
||||
assert!(matches!(
|
||||
policy.check_multiple(commands.iter()),
|
||||
Evaluation::NoMatch
|
||||
Evaluation::NoMatch { .. }
|
||||
));
|
||||
assert!(!temp_dir.path().join(POLICY_DIR_NAME).exists());
|
||||
}
|
||||
@@ -259,7 +259,7 @@ mod tests {
|
||||
let command = [vec!["ls".to_string()]];
|
||||
assert!(matches!(
|
||||
policy.check_multiple(command.iter()),
|
||||
Evaluation::NoMatch
|
||||
Evaluation::NoMatch { .. }
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
@@ -31,9 +31,6 @@ pub enum Feature {
|
||||
GhostCommit,
|
||||
/// Use the single unified PTY-backed exec tool.
|
||||
UnifiedExec,
|
||||
/// Use the shell command tool that takes `command` as a single string of
|
||||
/// shell instead of an array of args passed to `execvp(3)`.
|
||||
ShellCommandTool,
|
||||
/// Enable experimental RMCP features such as OAuth login.
|
||||
RmcpClient,
|
||||
/// Include the freeform apply_patch tool.
|
||||
@@ -275,12 +272,6 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ShellCommandTool,
|
||||
key: "shell_command_tool",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::RmcpClient,
|
||||
key: "rmcp_client",
|
||||
|
||||
@@ -825,11 +825,21 @@ mod tests {
|
||||
.await
|
||||
.expect("Should collect git info from repo");
|
||||
|
||||
let remote_url_output = Command::new("git")
|
||||
.args(["remote", "get-url", "origin"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to read remote url");
|
||||
// Some dev environments rewrite remotes (e.g., force SSH), so compare against
|
||||
// whatever URL Git reports instead of a fixed placeholder.
|
||||
let expected_remote = String::from_utf8(remote_url_output.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Should have repository URL
|
||||
assert_eq!(
|
||||
git_info.repository_url,
|
||||
Some("https://github.com/example/repo.git".to_string())
|
||||
);
|
||||
assert_eq!(git_info.repository_url, Some(expected_remote));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -39,6 +39,7 @@ pub mod parse_command;
|
||||
pub mod powershell;
|
||||
mod response_processing;
|
||||
pub mod sandboxing;
|
||||
mod text_encoding;
|
||||
pub mod token_data;
|
||||
mod truncate;
|
||||
mod unified_exec;
|
||||
|
||||
@@ -137,6 +137,19 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("robin") {
|
||||
model_family!(
|
||||
slug, "gpt-5.1",
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
support_verbosity: true,
|
||||
default_verbosity: Some(Verbosity::Low),
|
||||
base_instructions: GPT_5_1_INSTRUCTIONS.to_string(),
|
||||
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
supports_parallel_tool_calls: true,
|
||||
)
|
||||
} else if slug.starts_with("test-gpt-5") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
|
||||
@@ -2,7 +2,6 @@ use crate::model_family::ModelFamily;
|
||||
|
||||
// Shared constants for commonly used window/token sizes.
|
||||
pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000;
|
||||
pub(crate) const MAX_OUTPUT_TOKENS_128K: i64 = 128_000;
|
||||
|
||||
/// Metadata about a model, particularly OpenAI models.
|
||||
/// We may want to consider including details like the pricing for
|
||||
@@ -14,19 +13,15 @@ pub(crate) struct ModelInfo {
|
||||
/// Size of the context window in tokens. This is the maximum size of the input context.
|
||||
pub(crate) context_window: i64,
|
||||
|
||||
/// Maximum number of output tokens that can be generated for the model.
|
||||
pub(crate) max_output_tokens: i64,
|
||||
|
||||
/// Token threshold where we should automatically compact conversation history. This considers
|
||||
/// input tokens + output tokens of this turn.
|
||||
pub(crate) auto_compact_token_limit: Option<i64>,
|
||||
}
|
||||
|
||||
impl ModelInfo {
|
||||
const fn new(context_window: i64, max_output_tokens: i64) -> Self {
|
||||
const fn new(context_window: i64) -> Self {
|
||||
Self {
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
auto_compact_token_limit: Some(Self::default_auto_compact_limit(context_window)),
|
||||
}
|
||||
}
|
||||
@@ -42,48 +37,44 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
// OSS models have a 128k shared token pool.
|
||||
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
|
||||
// https://openai.com/index/gpt-oss-model-card/
|
||||
"gpt-oss-20b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-120b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-20b" => Some(ModelInfo::new(96_000)),
|
||||
"gpt-oss-120b" => Some(ModelInfo::new(96_000)),
|
||||
// https://platform.openai.com/docs/models/o3
|
||||
"o3" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o3" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/o4-mini
|
||||
"o4-mini" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o4-mini" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/codex-mini-latest
|
||||
"codex-mini-latest" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"codex-mini-latest" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4.1 defaults to gpt-4.1-2025-04-14.
|
||||
// https://platform.openai.com/docs/models/gpt-4.1
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576, 32_768)),
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576)),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4o defaults to gpt-4o-2024-08-06.
|
||||
// https://platform.openai.com/docs/models/gpt-4o
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-05-13
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000, 4_096)),
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-11-20
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-3.5-turbo
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385)),
|
||||
|
||||
_ if slug.starts_with("gpt-5-codex")
|
||||
|| slug.starts_with("gpt-5.1-codex")
|
||||
|| slug.starts_with("gpt-5.1-codex-max") =>
|
||||
{
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K))
|
||||
}
|
||||
|
||||
_ if slug.starts_with("gpt-5") => {
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
}
|
||||
_ if slug.starts_with("gpt-5") => Some(ModelInfo::new(CONTEXT_WINDOW_272K)),
|
||||
|
||||
_ if slug.starts_with("codex-") => {
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
}
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo::new(CONTEXT_WINDOW_272K)),
|
||||
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ ready‑to‑spawn environment.
|
||||
|
||||
pub mod assessment;
|
||||
|
||||
use crate::exec::ExecExpiration;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::exec::StdoutStream;
|
||||
@@ -48,23 +49,23 @@ impl From<bool> for SandboxPermissions {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct CommandSpec {
|
||||
pub program: String,
|
||||
pub args: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub env: HashMap<String, String>,
|
||||
pub timeout_ms: Option<u64>,
|
||||
pub expiration: ExecExpiration,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct ExecEnv {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub env: HashMap<String, String>,
|
||||
pub timeout_ms: Option<u64>,
|
||||
pub expiration: ExecExpiration,
|
||||
pub sandbox: SandboxType,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
@@ -115,13 +116,13 @@ impl SandboxManager {
|
||||
|
||||
pub(crate) fn transform(
|
||||
&self,
|
||||
spec: &CommandSpec,
|
||||
mut spec: CommandSpec,
|
||||
policy: &SandboxPolicy,
|
||||
sandbox: SandboxType,
|
||||
sandbox_policy_cwd: &Path,
|
||||
codex_linux_sandbox_exe: Option<&PathBuf>,
|
||||
) -> Result<ExecEnv, SandboxTransformError> {
|
||||
let mut env = spec.env.clone();
|
||||
let mut env = spec.env;
|
||||
if !policy.has_full_network_access() {
|
||||
env.insert(
|
||||
CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR.to_string(),
|
||||
@@ -130,8 +131,8 @@ impl SandboxManager {
|
||||
}
|
||||
|
||||
let mut command = Vec::with_capacity(1 + spec.args.len());
|
||||
command.push(spec.program.clone());
|
||||
command.extend(spec.args.iter().cloned());
|
||||
command.push(spec.program);
|
||||
command.append(&mut spec.args);
|
||||
|
||||
let (command, sandbox_env, arg0_override) = match sandbox {
|
||||
SandboxType::None => (command, HashMap::new(), None),
|
||||
@@ -176,12 +177,12 @@ impl SandboxManager {
|
||||
|
||||
Ok(ExecEnv {
|
||||
command,
|
||||
cwd: spec.cwd.clone(),
|
||||
cwd: spec.cwd,
|
||||
env,
|
||||
timeout_ms: spec.timeout_ms,
|
||||
expiration: spec.expiration,
|
||||
sandbox,
|
||||
with_escalated_permissions: spec.with_escalated_permissions,
|
||||
justification: spec.justification.clone(),
|
||||
justification: spec.justification,
|
||||
arg0: arg0_override,
|
||||
})
|
||||
}
|
||||
@@ -192,9 +193,9 @@ impl SandboxManager {
|
||||
}
|
||||
|
||||
pub async fn execute_env(
|
||||
env: &ExecEnv,
|
||||
env: ExecEnv,
|
||||
policy: &SandboxPolicy,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> crate::error::Result<ExecToolCallOutput> {
|
||||
execute_exec_env(env.clone(), policy, stdout_stream).await
|
||||
execute_exec_env(env, policy, stdout_stream).await
|
||||
}
|
||||
|
||||
@@ -7,61 +7,41 @@ pub enum ShellType {
|
||||
Zsh,
|
||||
Bash,
|
||||
PowerShell,
|
||||
Sh,
|
||||
Cmd,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct ZshShell {
|
||||
pub struct Shell {
|
||||
pub(crate) shell_type: ShellType,
|
||||
pub(crate) shell_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct BashShell {
|
||||
pub(crate) shell_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct PowerShellConfig {
|
||||
pub(crate) shell_path: PathBuf, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub enum Shell {
|
||||
Zsh(ZshShell),
|
||||
Bash(BashShell),
|
||||
PowerShell(PowerShellConfig),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl Shell {
|
||||
pub fn name(&self) -> Option<String> {
|
||||
match self {
|
||||
Shell::Zsh(ZshShell { shell_path, .. }) | Shell::Bash(BashShell { shell_path, .. }) => {
|
||||
std::path::Path::new(shell_path)
|
||||
.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
}
|
||||
Shell::PowerShell(ps) => ps
|
||||
.shell_path
|
||||
.file_stem()
|
||||
.map(|s| s.to_string_lossy().to_string()),
|
||||
Shell::Unknown => None,
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self.shell_type {
|
||||
ShellType::Zsh => "zsh",
|
||||
ShellType::Bash => "bash",
|
||||
ShellType::PowerShell => "powershell",
|
||||
ShellType::Sh => "sh",
|
||||
ShellType::Cmd => "cmd",
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a string of shell and returns the full list of command args to
|
||||
/// use with `exec()` to run the shell command.
|
||||
pub fn derive_exec_args(&self, command: &str, use_login_shell: bool) -> Vec<String> {
|
||||
match self {
|
||||
Shell::Zsh(ZshShell { shell_path, .. }) | Shell::Bash(BashShell { shell_path, .. }) => {
|
||||
match self.shell_type {
|
||||
ShellType::Zsh | ShellType::Bash | ShellType::Sh => {
|
||||
let arg = if use_login_shell { "-lc" } else { "-c" };
|
||||
vec![
|
||||
shell_path.to_string_lossy().to_string(),
|
||||
self.shell_path.to_string_lossy().to_string(),
|
||||
arg.to_string(),
|
||||
command.to_string(),
|
||||
]
|
||||
}
|
||||
Shell::PowerShell(ps) => {
|
||||
let mut args = vec![ps.shell_path.to_string_lossy().to_string()];
|
||||
ShellType::PowerShell => {
|
||||
let mut args = vec![self.shell_path.to_string_lossy().to_string()];
|
||||
if !use_login_shell {
|
||||
args.push("-NoProfile".to_string());
|
||||
}
|
||||
@@ -70,7 +50,12 @@ impl Shell {
|
||||
args.push(command.to_string());
|
||||
args
|
||||
}
|
||||
Shell::Unknown => shlex::split(command).unwrap_or_else(|| vec![command.to_string()]),
|
||||
ShellType::Cmd => {
|
||||
let mut args = vec![self.shell_path.to_string_lossy().to_string()];
|
||||
args.push("/c".to_string());
|
||||
args.push(command.to_string());
|
||||
args
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -143,19 +128,34 @@ fn get_shell_path(
|
||||
None
|
||||
}
|
||||
|
||||
fn get_zsh_shell(path: Option<&PathBuf>) -> Option<ZshShell> {
|
||||
fn get_zsh_shell(path: Option<&PathBuf>) -> Option<Shell> {
|
||||
let shell_path = get_shell_path(ShellType::Zsh, path, "zsh", vec!["/bin/zsh"]);
|
||||
|
||||
shell_path.map(|shell_path| ZshShell { shell_path })
|
||||
shell_path.map(|shell_path| Shell {
|
||||
shell_type: ShellType::Zsh,
|
||||
shell_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_bash_shell(path: Option<&PathBuf>) -> Option<BashShell> {
|
||||
fn get_bash_shell(path: Option<&PathBuf>) -> Option<Shell> {
|
||||
let shell_path = get_shell_path(ShellType::Bash, path, "bash", vec!["/bin/bash"]);
|
||||
|
||||
shell_path.map(|shell_path| BashShell { shell_path })
|
||||
shell_path.map(|shell_path| Shell {
|
||||
shell_type: ShellType::Bash,
|
||||
shell_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_powershell_shell(path: Option<&PathBuf>) -> Option<PowerShellConfig> {
|
||||
fn get_sh_shell(path: Option<&PathBuf>) -> Option<Shell> {
|
||||
let shell_path = get_shell_path(ShellType::Sh, path, "sh", vec!["/bin/sh"]);
|
||||
|
||||
shell_path.map(|shell_path| Shell {
|
||||
shell_type: ShellType::Sh,
|
||||
shell_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_powershell_shell(path: Option<&PathBuf>) -> Option<Shell> {
|
||||
let shell_path = get_shell_path(
|
||||
ShellType::PowerShell,
|
||||
path,
|
||||
@@ -164,26 +164,56 @@ fn get_powershell_shell(path: Option<&PathBuf>) -> Option<PowerShellConfig> {
|
||||
)
|
||||
.or_else(|| get_shell_path(ShellType::PowerShell, path, "powershell", vec![]));
|
||||
|
||||
shell_path.map(|shell_path| PowerShellConfig { shell_path })
|
||||
shell_path.map(|shell_path| Shell {
|
||||
shell_type: ShellType::PowerShell,
|
||||
shell_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_cmd_shell(path: Option<&PathBuf>) -> Option<Shell> {
|
||||
let shell_path = get_shell_path(ShellType::Cmd, path, "cmd", vec![]);
|
||||
|
||||
shell_path.map(|shell_path| Shell {
|
||||
shell_type: ShellType::Cmd,
|
||||
shell_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn ultimate_fallback_shell() -> Shell {
|
||||
if cfg!(windows) {
|
||||
Shell {
|
||||
shell_type: ShellType::Cmd,
|
||||
shell_path: PathBuf::from("cmd.exe"),
|
||||
}
|
||||
} else {
|
||||
Shell {
|
||||
shell_type: ShellType::Sh,
|
||||
shell_path: PathBuf::from("/bin/sh"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_shell_by_model_provided_path(shell_path: &PathBuf) -> Shell {
|
||||
detect_shell_type(shell_path)
|
||||
.and_then(|shell_type| get_shell(shell_type, Some(shell_path)))
|
||||
.unwrap_or(Shell::Unknown)
|
||||
.unwrap_or(ultimate_fallback_shell())
|
||||
}
|
||||
|
||||
pub fn get_shell(shell_type: ShellType, path: Option<&PathBuf>) -> Option<Shell> {
|
||||
match shell_type {
|
||||
ShellType::Zsh => get_zsh_shell(path).map(Shell::Zsh),
|
||||
ShellType::Bash => get_bash_shell(path).map(Shell::Bash),
|
||||
ShellType::PowerShell => get_powershell_shell(path).map(Shell::PowerShell),
|
||||
ShellType::Zsh => get_zsh_shell(path),
|
||||
ShellType::Bash => get_bash_shell(path),
|
||||
ShellType::PowerShell => get_powershell_shell(path),
|
||||
ShellType::Sh => get_sh_shell(path),
|
||||
ShellType::Cmd => get_cmd_shell(path),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn detect_shell_type(shell_path: &PathBuf) -> Option<ShellType> {
|
||||
match shell_path.as_os_str().to_str() {
|
||||
Some("zsh") => Some(ShellType::Zsh),
|
||||
Some("sh") => Some(ShellType::Sh),
|
||||
Some("cmd") => Some(ShellType::Cmd),
|
||||
Some("bash") => Some(ShellType::Bash),
|
||||
Some("pwsh") => Some(ShellType::PowerShell),
|
||||
Some("powershell") => Some(ShellType::PowerShell),
|
||||
@@ -200,11 +230,15 @@ pub fn detect_shell_type(shell_path: &PathBuf) -> Option<ShellType> {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn default_user_shell() -> Shell {
|
||||
pub fn default_user_shell() -> Shell {
|
||||
default_user_shell_from_path(get_user_shell_path())
|
||||
}
|
||||
|
||||
fn default_user_shell_from_path(user_shell_path: Option<PathBuf>) -> Shell {
|
||||
if cfg!(windows) {
|
||||
get_shell(ShellType::PowerShell, None).unwrap_or(Shell::Unknown)
|
||||
get_shell(ShellType::PowerShell, None).unwrap_or(ultimate_fallback_shell())
|
||||
} else {
|
||||
let user_default_shell = get_user_shell_path()
|
||||
let user_default_shell = user_shell_path
|
||||
.and_then(|shell| detect_shell_type(&shell))
|
||||
.and_then(|shell_type| get_shell(shell_type, None));
|
||||
|
||||
@@ -218,7 +252,7 @@ pub async fn default_user_shell() -> Shell {
|
||||
.or_else(|| get_shell(ShellType::Zsh, None))
|
||||
};
|
||||
|
||||
shell_with_fallback.unwrap_or(Shell::Unknown)
|
||||
shell_with_fallback.unwrap_or(ultimate_fallback_shell())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,6 +308,19 @@ mod detect_shell_type_tests {
|
||||
detect_shell_type(&PathBuf::from("/usr/local/bin/pwsh")),
|
||||
Some(ShellType::PowerShell)
|
||||
);
|
||||
assert_eq!(
|
||||
detect_shell_type(&PathBuf::from("/bin/sh")),
|
||||
Some(ShellType::Sh)
|
||||
);
|
||||
assert_eq!(detect_shell_type(&PathBuf::from("sh")), Some(ShellType::Sh));
|
||||
assert_eq!(
|
||||
detect_shell_type(&PathBuf::from("cmd")),
|
||||
Some(ShellType::Cmd)
|
||||
);
|
||||
assert_eq!(
|
||||
detect_shell_type(&PathBuf::from("cmd.exe")),
|
||||
Some(ShellType::Cmd)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,10 +336,17 @@ mod tests {
|
||||
fn detects_zsh() {
|
||||
let zsh_shell = get_shell(ShellType::Zsh, None).unwrap();
|
||||
|
||||
let ZshShell { shell_path } = match zsh_shell {
|
||||
Shell::Zsh(zsh_shell) => zsh_shell,
|
||||
_ => panic!("expected zsh shell"),
|
||||
};
|
||||
let shell_path = zsh_shell.shell_path;
|
||||
|
||||
assert_eq!(shell_path, PathBuf::from("/bin/zsh"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(target_os = "macos")]
|
||||
fn fish_fallback_to_zsh() {
|
||||
let zsh_shell = default_user_shell_from_path(Some(PathBuf::from("/bin/fish")));
|
||||
|
||||
let shell_path = zsh_shell.shell_path;
|
||||
|
||||
assert_eq!(shell_path, PathBuf::from("/bin/zsh"));
|
||||
}
|
||||
@@ -300,18 +354,60 @@ mod tests {
|
||||
#[test]
|
||||
fn detects_bash() {
|
||||
let bash_shell = get_shell(ShellType::Bash, None).unwrap();
|
||||
let BashShell { shell_path } = match bash_shell {
|
||||
Shell::Bash(bash_shell) => bash_shell,
|
||||
_ => panic!("expected bash shell"),
|
||||
};
|
||||
let shell_path = bash_shell.shell_path;
|
||||
|
||||
assert!(
|
||||
shell_path == PathBuf::from("/bin/bash")
|
||||
|| shell_path == PathBuf::from("/usr/bin/bash"),
|
||||
|| shell_path == PathBuf::from("/usr/bin/bash")
|
||||
|| shell_path == PathBuf::from("/usr/local/bin/bash"),
|
||||
"shell path: {shell_path:?}",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_sh() {
|
||||
let sh_shell = get_shell(ShellType::Sh, None).unwrap();
|
||||
let shell_path = sh_shell.shell_path;
|
||||
assert!(
|
||||
shell_path == PathBuf::from("/bin/sh") || shell_path == PathBuf::from("/usr/bin/sh"),
|
||||
"shell path: {shell_path:?}",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_run_on_shell_test() {
|
||||
let cmd = "echo \"Works\"";
|
||||
if cfg!(windows) {
|
||||
assert!(shell_works(
|
||||
get_shell(ShellType::PowerShell, None),
|
||||
"Out-String 'Works'",
|
||||
true,
|
||||
));
|
||||
assert!(shell_works(get_shell(ShellType::Cmd, None), cmd, true,));
|
||||
assert!(shell_works(Some(ultimate_fallback_shell()), cmd, true));
|
||||
} else {
|
||||
assert!(shell_works(Some(ultimate_fallback_shell()), cmd, true));
|
||||
assert!(shell_works(get_shell(ShellType::Zsh, None), cmd, false));
|
||||
assert!(shell_works(get_shell(ShellType::Bash, None), cmd, true));
|
||||
assert!(shell_works(get_shell(ShellType::Sh, None), cmd, true));
|
||||
}
|
||||
}
|
||||
|
||||
fn shell_works(shell: Option<Shell>, command: &str, required: bool) -> bool {
|
||||
if let Some(shell) = shell {
|
||||
let args = shell.derive_exec_args(command, false);
|
||||
let output = Command::new(args[0].clone())
|
||||
.args(&args[1..])
|
||||
.output()
|
||||
.unwrap();
|
||||
assert!(output.status.success());
|
||||
assert!(String::from_utf8_lossy(&output.stdout).contains("Works"));
|
||||
true
|
||||
} else {
|
||||
!required
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_current_shell_detects_zsh() {
|
||||
let shell = Command::new("sh")
|
||||
@@ -323,10 +419,11 @@ mod tests {
|
||||
let shell_path = String::from_utf8_lossy(&shell.stdout).trim().to_string();
|
||||
if shell_path.ends_with("/zsh") {
|
||||
assert_eq!(
|
||||
default_user_shell().await,
|
||||
Shell::Zsh(ZshShell {
|
||||
default_user_shell(),
|
||||
Shell {
|
||||
shell_type: ShellType::Zsh,
|
||||
shell_path: PathBuf::from(shell_path),
|
||||
})
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -337,11 +434,8 @@ mod tests {
|
||||
return;
|
||||
}
|
||||
|
||||
let powershell_shell = default_user_shell().await;
|
||||
let PowerShellConfig { shell_path } = match powershell_shell {
|
||||
Shell::PowerShell(powershell_shell) => powershell_shell,
|
||||
_ => panic!("expected powershell shell"),
|
||||
};
|
||||
let powershell_shell = default_user_shell();
|
||||
let shell_path = powershell_shell.shell_path;
|
||||
|
||||
assert!(shell_path.ends_with("pwsh.exe") || shell_path.ends_with("powershell.exe"));
|
||||
}
|
||||
@@ -353,10 +447,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let powershell_shell = get_shell(ShellType::PowerShell, None).unwrap();
|
||||
let PowerShellConfig { shell_path } = match powershell_shell {
|
||||
Shell::PowerShell(powershell_shell) => powershell_shell,
|
||||
_ => panic!("expected powershell shell"),
|
||||
};
|
||||
let shell_path = powershell_shell.shell_path;
|
||||
|
||||
assert!(shell_path.ends_with("pwsh.exe") || shell_path.ends_with("powershell.exe"));
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ use crate::user_shell_command::user_shell_command_record_item;
|
||||
use super::SessionTask;
|
||||
use super::SessionTaskContext;
|
||||
|
||||
const USER_SHELL_TIMEOUT_MS: u64 = 60 * 60 * 1000; // 1 hour
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct UserShellCommandTask {
|
||||
command: String,
|
||||
@@ -93,7 +95,9 @@ impl SessionTask for UserShellCommandTask {
|
||||
command: command.clone(),
|
||||
cwd: cwd.clone(),
|
||||
env: create_env(&turn_context.shell_environment_policy),
|
||||
timeout_ms: None,
|
||||
// TODO(zhao-oai): Now that we have ExecExpiration::Cancellation, we
|
||||
// should use that instead of an "arbitrarily large" timeout here.
|
||||
expiration: USER_SHELL_TIMEOUT_MS.into(),
|
||||
sandbox: SandboxType::None,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
|
||||
461
codex-rs/core/src/text_encoding.rs
Normal file
461
codex-rs/core/src/text_encoding.rs
Normal file
@@ -0,0 +1,461 @@
|
||||
//! Text encoding detection and conversion utilities for shell output.
|
||||
//!
|
||||
//! Windows users frequently run into code pages such as CP1251 or CP866 when invoking commands
|
||||
//! through VS Code. Those bytes show up as invalid UTF-8 and used to be replaced with the standard
|
||||
//! Unicode replacement character. We now lean on `chardetng` and `encoding_rs` so we can
|
||||
//! automatically detect and decode the vast majority of legacy encodings before falling back to
|
||||
//! lossy UTF-8 decoding.
|
||||
|
||||
use chardetng::EncodingDetector;
|
||||
use encoding_rs::Encoding;
|
||||
use encoding_rs::IBM866;
|
||||
use encoding_rs::WINDOWS_1252;
|
||||
|
||||
/// Attempts to convert arbitrary bytes to UTF-8 with best-effort encoding detection.
|
||||
pub fn bytes_to_string_smart(bytes: &[u8]) -> String {
|
||||
if bytes.is_empty() {
|
||||
return String::new();
|
||||
}
|
||||
|
||||
if let Ok(utf8_str) = std::str::from_utf8(bytes) {
|
||||
return utf8_str.to_owned();
|
||||
}
|
||||
|
||||
let encoding = detect_encoding(bytes);
|
||||
decode_bytes(bytes, encoding)
|
||||
}
|
||||
|
||||
// Windows-1252 reassigns a handful of 0x80-0x9F slots to smart punctuation (curly quotes, dashes,
|
||||
// ™). CP866 uses those *same byte values* for uppercase Cyrillic letters. When chardetng sees shell
|
||||
// snippets that mix these bytes with ASCII it sometimes guesses IBM866, so “smart quotes” render as
|
||||
// Cyrillic garbage (“УФЦ”) in VS Code. However, CP866 uppercase tokens are perfectly valid output
|
||||
// (e.g., `ПРИ test`) so we cannot flip every 0x80-0x9F byte to Windows-1252 either. The compromise
|
||||
// is to only coerce IBM866 to Windows-1252 when (a) the high bytes are exclusively the punctuation
|
||||
// values listed below and (b) we spot adjacent ASCII. This targets the real failure case without
|
||||
// clobbering legitimate Cyrillic text. If another code page has a similar collision, introduce a
|
||||
// dedicated allowlist (like this one) plus unit tests that capture the actual shell output we want
|
||||
// to preserve. Windows-1252 byte values for smart punctuation.
|
||||
const WINDOWS_1252_PUNCT_BYTES: [u8; 8] = [
|
||||
0x91, // ‘ (left single quotation mark)
|
||||
0x92, // ’ (right single quotation mark)
|
||||
0x93, // “ (left double quotation mark)
|
||||
0x94, // ” (right double quotation mark)
|
||||
0x95, // • (bullet)
|
||||
0x96, // – (en dash)
|
||||
0x97, // — (em dash)
|
||||
0x99, // ™ (trade mark sign)
|
||||
];
|
||||
|
||||
fn detect_encoding(bytes: &[u8]) -> &'static Encoding {
|
||||
let mut detector = EncodingDetector::new();
|
||||
detector.feed(bytes, true);
|
||||
let (encoding, _is_confident) = detector.guess_assess(None, true);
|
||||
|
||||
// chardetng occasionally reports IBM866 for short strings that only contain Windows-1252 “smart
|
||||
// punctuation” bytes (0x80-0x9F) because that range maps to Cyrillic letters in IBM866. When
|
||||
// those bytes show up alongside an ASCII word (typical shell output: `"“`test), we know the
|
||||
// intent was likely CP1252 quotes/dashes. Prefer WINDOWS_1252 in that specific situation so we
|
||||
// render the characters users expect instead of Cyrillic junk. References:
|
||||
// - Windows-1252 reserving 0x80-0x9F for curly quotes/dashes:
|
||||
// https://en.wikipedia.org/wiki/Windows-1252
|
||||
// - CP866 mapping 0x93/0x94/0x96 to Cyrillic letters, so the same bytes show up as “УФЦ” when
|
||||
// mis-decoded: https://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/PC/CP866.TXT
|
||||
if encoding == IBM866 && looks_like_windows_1252_punctuation(bytes) {
|
||||
return WINDOWS_1252;
|
||||
}
|
||||
|
||||
encoding
|
||||
}
|
||||
|
||||
fn decode_bytes(bytes: &[u8], encoding: &'static Encoding) -> String {
|
||||
let (decoded, _, had_errors) = encoding.decode(bytes);
|
||||
|
||||
if had_errors {
|
||||
return String::from_utf8_lossy(bytes).into_owned();
|
||||
}
|
||||
|
||||
decoded.into_owned()
|
||||
}
|
||||
|
||||
/// Detect whether the byte stream looks like Windows-1252 “smart punctuation” wrapped around
|
||||
/// otherwise-ASCII text.
|
||||
///
|
||||
/// Context: IBM866 and Windows-1252 share the 0x80-0x9F slot range. In IBM866 these bytes decode to
|
||||
/// Cyrillic letters, whereas Windows-1252 maps them to curly quotes and dashes. chardetng can guess
|
||||
/// IBM866 for short snippets that only contain those bytes, which turns shell output such as
|
||||
/// `“test”` into unreadable Cyrillic. To avoid that, we treat inputs comprising a handful of bytes
|
||||
/// from the problematic range plus ASCII letters as CP1252 punctuation. We deliberately do *not*
|
||||
/// cap how many of those punctuation bytes we accept: VS Code frequently prints several quoted
|
||||
/// phrases (e.g., `"foo" – "bar"`), and truncating the count would once again mis-decode those as
|
||||
/// Cyrillic. If we discover additional encodings with overlapping byte ranges, prefer adding
|
||||
/// encoding-specific byte allowlists like `WINDOWS_1252_PUNCT` and tests that exercise real-world
|
||||
/// shell snippets.
|
||||
fn looks_like_windows_1252_punctuation(bytes: &[u8]) -> bool {
|
||||
let mut saw_extended_punctuation = false;
|
||||
let mut saw_ascii_word = false;
|
||||
|
||||
for &byte in bytes {
|
||||
if byte >= 0xA0 {
|
||||
return false;
|
||||
}
|
||||
if (0x80..=0x9F).contains(&byte) {
|
||||
if !is_windows_1252_punct(byte) {
|
||||
return false;
|
||||
}
|
||||
saw_extended_punctuation = true;
|
||||
}
|
||||
if byte.is_ascii_alphabetic() {
|
||||
saw_ascii_word = true;
|
||||
}
|
||||
}
|
||||
|
||||
saw_extended_punctuation && saw_ascii_word
|
||||
}
|
||||
|
||||
fn is_windows_1252_punct(byte: u8) -> bool {
|
||||
WINDOWS_1252_PUNCT_BYTES.contains(&byte)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use encoding_rs::BIG5;
|
||||
use encoding_rs::EUC_KR;
|
||||
use encoding_rs::GBK;
|
||||
use encoding_rs::ISO_8859_2;
|
||||
use encoding_rs::ISO_8859_3;
|
||||
use encoding_rs::ISO_8859_4;
|
||||
use encoding_rs::ISO_8859_5;
|
||||
use encoding_rs::ISO_8859_6;
|
||||
use encoding_rs::ISO_8859_7;
|
||||
use encoding_rs::ISO_8859_8;
|
||||
use encoding_rs::ISO_8859_10;
|
||||
use encoding_rs::ISO_8859_13;
|
||||
use encoding_rs::SHIFT_JIS;
|
||||
use encoding_rs::WINDOWS_874;
|
||||
use encoding_rs::WINDOWS_1250;
|
||||
use encoding_rs::WINDOWS_1251;
|
||||
use encoding_rs::WINDOWS_1253;
|
||||
use encoding_rs::WINDOWS_1254;
|
||||
use encoding_rs::WINDOWS_1255;
|
||||
use encoding_rs::WINDOWS_1256;
|
||||
use encoding_rs::WINDOWS_1257;
|
||||
use encoding_rs::WINDOWS_1258;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn test_utf8_passthrough() {
|
||||
// Fast path: when UTF-8 is valid we should avoid copies and return as-is.
|
||||
let utf8_text = "Hello, мир! 世界";
|
||||
let bytes = utf8_text.as_bytes();
|
||||
assert_eq!(bytes_to_string_smart(bytes), utf8_text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp1251_russian_text() {
|
||||
// Cyrillic text emitted by PowerShell/WSL in CP1251 should decode cleanly.
|
||||
let bytes = b"\xEF\xF0\xE8\xEC\xE5\xF0"; // "пример" encoded with Windows-1251
|
||||
assert_eq!(bytes_to_string_smart(bytes), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp1251_privet_word() {
|
||||
// Regression: CP1251 words like "Привет" must not be mis-identified as Windows-1252.
|
||||
let bytes = b"\xCF\xF0\xE8\xE2\xE5\xF2"; // "Привет" encoded with Windows-1251
|
||||
assert_eq!(bytes_to_string_smart(bytes), "Привет");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_koi8_r_privet_word() {
|
||||
// KOI8-R output should decode to the original Cyrillic as well.
|
||||
let bytes = b"\xF0\xD2\xC9\xD7\xC5\xD4"; // "Привет" encoded with KOI8-R
|
||||
assert_eq!(bytes_to_string_smart(bytes), "Привет");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp866_russian_text() {
|
||||
// Legacy consoles (cmd.exe) commonly emit CP866 bytes for Cyrillic content.
|
||||
let bytes = b"\xAF\xE0\xA8\xAC\xA5\xE0"; // "пример" encoded with CP866
|
||||
assert_eq!(bytes_to_string_smart(bytes), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp866_uppercase_text() {
|
||||
// Ensure the IBM866 heuristic still returns IBM866 for uppercase-only words.
|
||||
let bytes = b"\x8F\x90\x88"; // "ПРИ" encoded with CP866 uppercase letters
|
||||
assert_eq!(bytes_to_string_smart(bytes), "ПРИ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp866_uppercase_followed_by_ascii() {
|
||||
// Regression test: uppercase CP866 tokens next to ASCII text should not be treated as
|
||||
// CP1252.
|
||||
let bytes = b"\x8F\x90\x88 test"; // "ПРИ test" encoded with CP866 uppercase letters followed by ASCII
|
||||
assert_eq!(bytes_to_string_smart(bytes), "ПРИ test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1252_quotes() {
|
||||
// Smart detection should map Windows-1252 punctuation into proper Unicode.
|
||||
let bytes = b"\x93\x94test";
|
||||
assert_eq!(bytes_to_string_smart(bytes), "\u{201C}\u{201D}test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1252_multiple_quotes() {
|
||||
// Longer snippets of punctuation (e.g., “foo” – “bar”) should still flip to CP1252.
|
||||
let bytes = b"\x93foo\x94 \x96 \x93bar\x94";
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(bytes),
|
||||
"\u{201C}foo\u{201D} \u{2013} \u{201C}bar\u{201D}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1252_privet_gibberish_is_preserved() {
|
||||
// Windows-1252 cannot encode Cyrillic; if the input literally contains "ПÑ..." we should not "fix" it.
|
||||
let bytes = "Привет".as_bytes();
|
||||
assert_eq!(bytes_to_string_smart(bytes), "Привет");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_1_latin_text() {
|
||||
// ISO-8859-1 (code page 28591) is the Latin segment used by LatArCyrHeb.
|
||||
// encoding_rs unifies ISO-8859-1 with Windows-1252, so reuse that constant here.
|
||||
let (encoded, _, had_errors) = WINDOWS_1252.encode("Hello");
|
||||
assert!(!had_errors, "failed to encode Latin sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_2_central_european_text() {
|
||||
// ISO-8859-2 (code page 28592) covers additional Central European glyphs.
|
||||
let (encoded, _, had_errors) = ISO_8859_2.encode("Příliš žluťoučký kůň");
|
||||
assert!(!had_errors, "failed to encode ISO-8859-2 sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"Příliš žluťoučký kůň"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_3_south_europe_text() {
|
||||
// ISO-8859-3 (code page 28593) adds support for Maltese/Esperanto letters.
|
||||
// chardetng rarely distinguishes ISO-8859-3 from neighboring Latin code pages, so we rely on
|
||||
// an ASCII-only sample to ensure round-tripping still succeeds.
|
||||
let (encoded, _, had_errors) = ISO_8859_3.encode("Esperanto and Maltese");
|
||||
assert!(!had_errors, "failed to encode ISO-8859-3 sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"Esperanto and Maltese"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_4_baltic_text() {
|
||||
// ISO-8859-4 (code page 28594) targets the Baltic/Nordic repertoire.
|
||||
let sample = "Šis ir rakstzīmju kodēšanas tests. Dažās valodās, kurās tiek \
|
||||
izmantotas latīņu valodas burti, lēmuma pieņemšanai mums ir nepieciešams \
|
||||
vairāk ieguldījuma.";
|
||||
let (encoded, _, had_errors) = ISO_8859_4.encode(sample);
|
||||
assert!(!had_errors, "failed to encode ISO-8859-4 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_5_cyrillic_text() {
|
||||
// ISO-8859-5 (code page 28595) covers the Cyrillic portion.
|
||||
let (encoded, _, had_errors) = ISO_8859_5.encode("Привет");
|
||||
assert!(!had_errors, "failed to encode Cyrillic sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Привет");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_6_arabic_text() {
|
||||
// ISO-8859-6 (code page 28596) covers the Arabic glyphs.
|
||||
let (encoded, _, had_errors) = ISO_8859_6.encode("مرحبا");
|
||||
assert!(!had_errors, "failed to encode Arabic sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "مرحبا");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_7_greek_text() {
|
||||
// ISO-8859-7 (code page 28597) is used for Greek locales.
|
||||
let (encoded, _, had_errors) = ISO_8859_7.encode("Καλημέρα");
|
||||
assert!(!had_errors, "failed to encode ISO-8859-7 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Καλημέρα");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_8_hebrew_text() {
|
||||
// ISO-8859-8 (code page 28598) covers the Hebrew glyphs.
|
||||
let (encoded, _, had_errors) = ISO_8859_8.encode("שלום");
|
||||
assert!(!had_errors, "failed to encode Hebrew sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "שלום");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_9_turkish_text() {
|
||||
// ISO-8859-9 (code page 28599) mirrors Latin-1 but inserts Turkish letters.
|
||||
// encoding_rs exposes the equivalent Windows-1254 mapping.
|
||||
let (encoded, _, had_errors) = WINDOWS_1254.encode("İstanbul");
|
||||
assert!(!had_errors, "failed to encode ISO-8859-9 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "İstanbul");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_10_nordic_text() {
|
||||
// ISO-8859-10 (code page 28600) adds additional Nordic letters.
|
||||
let sample = "Þetta er prófun fyrir Ægir og Øystein.";
|
||||
let (encoded, _, had_errors) = ISO_8859_10.encode(sample);
|
||||
assert!(!had_errors, "failed to encode ISO-8859-10 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_11_thai_text() {
|
||||
// ISO-8859-11 (code page 28601) mirrors TIS-620 / Windows-874 for Thai.
|
||||
let sample = "ภาษาไทยสำหรับการทดสอบ ISO-8859-11";
|
||||
// encoding_rs exposes the equivalent Windows-874 encoding, so use that constant.
|
||||
let (encoded, _, had_errors) = WINDOWS_874.encode(sample);
|
||||
assert!(!had_errors, "failed to encode ISO-8859-11 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample);
|
||||
}
|
||||
|
||||
// ISO-8859-12 was never standardized, and encodings 14–16 cannot be distinguished reliably
|
||||
// without the heuristics we removed (chardetng generally reports neighboring Latin pages), so
|
||||
// we intentionally omit coverage for those slots until the detector can identify them.
|
||||
|
||||
#[test]
|
||||
fn test_iso8859_13_baltic_text() {
|
||||
// ISO-8859-13 (code page 28603) is common across Baltic languages.
|
||||
let (encoded, _, had_errors) = ISO_8859_13.encode("Sveiki");
|
||||
assert!(!had_errors, "failed to encode ISO-8859-13 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Sveiki");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1250_central_european_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1250.encode("Příliš žluťoučký kůň");
|
||||
assert!(!had_errors, "failed to encode Central European sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"Příliš žluťoučký kůň"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1251_encoded_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1251.encode("Привет из Windows-1251");
|
||||
assert!(!had_errors, "failed to encode Windows-1251 sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"Привет из Windows-1251"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1253_greek_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1253.encode("Γειά σου");
|
||||
assert!(!had_errors, "failed to encode Greek sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Γειά σου");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1254_turkish_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1254.encode("İstanbul");
|
||||
assert!(!had_errors, "failed to encode Turkish sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "İstanbul");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1255_hebrew_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1255.encode("שלום");
|
||||
assert!(!had_errors, "failed to encode Windows-1255 Hebrew sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "שלום");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1256_arabic_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1256.encode("مرحبا");
|
||||
assert!(!had_errors, "failed to encode Windows-1256 Arabic sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "مرحبا");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1257_baltic_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1257.encode("Pērkons");
|
||||
assert!(!had_errors, "failed to encode Baltic sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Pērkons");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1258_vietnamese_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_1258.encode("Xin chào");
|
||||
assert!(!had_errors, "failed to encode Vietnamese sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Xin chào");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_874_thai_text() {
|
||||
let (encoded, _, had_errors) = WINDOWS_874.encode("สวัสดีครับ นี่คือการทดสอบภาษาไทย");
|
||||
assert!(!had_errors, "failed to encode Thai sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"สวัสดีครับ นี่คือการทดสอบภาษาไทย"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_932_shift_jis_text() {
|
||||
let (encoded, _, had_errors) = SHIFT_JIS.encode("こんにちは");
|
||||
assert!(!had_errors, "failed to encode Shift-JIS sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "こんにちは");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_936_gbk_text() {
|
||||
let (encoded, _, had_errors) = GBK.encode("你好,世界,这是一个测试");
|
||||
assert!(!had_errors, "failed to encode GBK sample");
|
||||
assert_eq!(
|
||||
bytes_to_string_smart(encoded.as_ref()),
|
||||
"你好,世界,这是一个测试"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_949_korean_text() {
|
||||
let (encoded, _, had_errors) = EUC_KR.encode("안녕하세요");
|
||||
assert!(!had_errors, "failed to encode Korean sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "안녕하세요");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_950_big5_text() {
|
||||
let (encoded, _, had_errors) = BIG5.encode("繁體");
|
||||
assert!(!had_errors, "failed to encode Big5 sample");
|
||||
assert_eq!(bytes_to_string_smart(encoded.as_ref()), "繁體");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_latin1_cafe() {
|
||||
// Latin-1 bytes remain common in Western-European locales; decode them directly.
|
||||
let bytes = b"caf\xE9"; // codespell:ignore caf
|
||||
assert_eq!(bytes_to_string_smart(bytes), "café");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserves_ansi_sequences() {
|
||||
// ANSI escape sequences should survive regardless of the detected encoding.
|
||||
let bytes = b"\x1b[31mred\x1b[0m";
|
||||
assert_eq!(bytes_to_string_smart(bytes), "\x1b[31mred\x1b[0m");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fallback_to_lossy() {
|
||||
// Completely invalid sequences fall back to the old lossy behavior.
|
||||
let invalid_bytes = [0xFF, 0xFE, 0xFD];
|
||||
let result = bytes_to_string_smart(&invalid_bytes);
|
||||
assert_eq!(result, String::from_utf8_lossy(&invalid_bytes));
|
||||
}
|
||||
}
|
||||
@@ -37,7 +37,7 @@ impl ShellHandler {
|
||||
ExecParams {
|
||||
command: params.command,
|
||||
cwd: turn_context.resolve_path(params.workdir.clone()),
|
||||
timeout_ms: params.timeout_ms,
|
||||
expiration: params.timeout_ms.into(),
|
||||
env: create_env(&turn_context.shell_environment_policy),
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
@@ -59,7 +59,7 @@ impl ShellCommandHandler {
|
||||
ExecParams {
|
||||
command,
|
||||
cwd: turn_context.resolve_path(params.workdir.clone()),
|
||||
timeout_ms: params.timeout_ms,
|
||||
expiration: params.timeout_ms.into(),
|
||||
env: create_env(&turn_context.shell_environment_policy),
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
@@ -243,7 +243,7 @@ impl ShellHandler {
|
||||
let req = ApplyPatchRequest {
|
||||
patch: apply.action.patch.clone(),
|
||||
cwd: apply.action.cwd.clone(),
|
||||
timeout_ms: exec_params.timeout_ms,
|
||||
timeout_ms: exec_params.expiration.timeout_ms(),
|
||||
user_explicitly_approved: apply.user_explicitly_approved_this_action,
|
||||
codex_exe: turn.codex_linux_sandbox_exe.clone(),
|
||||
};
|
||||
@@ -300,7 +300,7 @@ impl ShellHandler {
|
||||
let req = ShellRequest {
|
||||
command: exec_params.command.clone(),
|
||||
cwd: exec_params.cwd.clone(),
|
||||
timeout_ms: exec_params.timeout_ms,
|
||||
timeout_ms: exec_params.expiration.timeout_ms(),
|
||||
env: exec_params.env.clone(),
|
||||
with_escalated_permissions: exec_params.with_escalated_permissions,
|
||||
justification: exec_params.justification.clone(),
|
||||
@@ -338,29 +338,30 @@ mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
use crate::shell::BashShell;
|
||||
use crate::shell::PowerShellConfig;
|
||||
use crate::shell::Shell;
|
||||
use crate::shell::ZshShell;
|
||||
use crate::shell::ShellType;
|
||||
|
||||
/// The logic for is_known_safe_command() has heuristics for known shells,
|
||||
/// so we must ensure the commands generated by [ShellCommandHandler] can be
|
||||
/// recognized as safe if the `command` is safe.
|
||||
#[test]
|
||||
fn commands_generated_by_shell_command_handler_can_be_matched_by_is_known_safe_command() {
|
||||
let bash_shell = Shell::Bash(BashShell {
|
||||
let bash_shell = Shell {
|
||||
shell_type: ShellType::Bash,
|
||||
shell_path: PathBuf::from("/bin/bash"),
|
||||
});
|
||||
};
|
||||
assert_safe(&bash_shell, "ls -la");
|
||||
|
||||
let zsh_shell = Shell::Zsh(ZshShell {
|
||||
let zsh_shell = Shell {
|
||||
shell_type: ShellType::Zsh,
|
||||
shell_path: PathBuf::from("/bin/zsh"),
|
||||
});
|
||||
};
|
||||
assert_safe(&zsh_shell, "ls -la");
|
||||
|
||||
let powershell = Shell::PowerShell(PowerShellConfig {
|
||||
let powershell = Shell {
|
||||
shell_type: ShellType::PowerShell,
|
||||
shell_path: PathBuf::from("pwsh.exe"),
|
||||
});
|
||||
};
|
||||
assert_safe(&powershell, "ls -Name");
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ impl ApplyPatchRuntime {
|
||||
program,
|
||||
args: vec![CODEX_APPLY_PATCH_ARG1.to_string(), req.patch.clone()],
|
||||
cwd: req.cwd.clone(),
|
||||
timeout_ms: req.timeout_ms,
|
||||
expiration: req.timeout_ms.into(),
|
||||
// Run apply_patch with a minimal environment for determinism and to avoid leaks.
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
@@ -153,9 +153,9 @@ impl ToolRuntime<ApplyPatchRequest, ExecToolCallOutput> for ApplyPatchRuntime {
|
||||
) -> Result<ExecToolCallOutput, ToolError> {
|
||||
let spec = Self::build_command_spec(req)?;
|
||||
let env = attempt
|
||||
.env_for(&spec)
|
||||
.env_for(spec)
|
||||
.map_err(|err| ToolError::Codex(err.into()))?;
|
||||
let out = execute_env(&env, attempt.policy, Self::stdout_stream(ctx))
|
||||
let out = execute_env(env, attempt.policy, Self::stdout_stream(ctx))
|
||||
.await
|
||||
.map_err(ToolError::Codex)?;
|
||||
Ok(out)
|
||||
|
||||
@@ -4,6 +4,7 @@ Module: runtimes
|
||||
Concrete ToolRuntime implementations for specific tools. Each runtime stays
|
||||
small and focused and reuses the orchestrator for approvals + sandbox + retry.
|
||||
*/
|
||||
use crate::exec::ExecExpiration;
|
||||
use crate::sandboxing::CommandSpec;
|
||||
use crate::tools::sandboxing::ToolError;
|
||||
use std::collections::HashMap;
|
||||
@@ -19,7 +20,7 @@ pub(crate) fn build_command_spec(
|
||||
command: &[String],
|
||||
cwd: &Path,
|
||||
env: &HashMap<String, String>,
|
||||
timeout_ms: Option<u64>,
|
||||
expiration: ExecExpiration,
|
||||
with_escalated_permissions: Option<bool>,
|
||||
justification: Option<String>,
|
||||
) -> Result<CommandSpec, ToolError> {
|
||||
@@ -31,7 +32,7 @@ pub(crate) fn build_command_spec(
|
||||
args: args.to_vec(),
|
||||
cwd: cwd.to_path_buf(),
|
||||
env: env.clone(),
|
||||
timeout_ms,
|
||||
expiration,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
})
|
||||
|
||||
@@ -133,14 +133,14 @@ impl ToolRuntime<ShellRequest, ExecToolCallOutput> for ShellRuntime {
|
||||
&req.command,
|
||||
&req.cwd,
|
||||
&req.env,
|
||||
req.timeout_ms,
|
||||
req.timeout_ms.into(),
|
||||
req.with_escalated_permissions,
|
||||
req.justification.clone(),
|
||||
)?;
|
||||
let env = attempt
|
||||
.env_for(&spec)
|
||||
.env_for(spec)
|
||||
.map_err(|err| ToolError::Codex(err.into()))?;
|
||||
let out = execute_env(&env, attempt.policy, Self::stdout_stream(ctx))
|
||||
let out = execute_env(env, attempt.policy, Self::stdout_stream(ctx))
|
||||
.await
|
||||
.map_err(ToolError::Codex)?;
|
||||
Ok(out)
|
||||
|
||||
@@ -6,6 +6,7 @@ the session manager to spawn PTYs once an ExecEnv is prepared.
|
||||
*/
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::exec::ExecExpiration;
|
||||
use crate::tools::runtimes::build_command_spec;
|
||||
use crate::tools::sandboxing::Approvable;
|
||||
use crate::tools::sandboxing::ApprovalCtx;
|
||||
@@ -150,13 +151,13 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecSession> for UnifiedExecRunt
|
||||
&req.command,
|
||||
&req.cwd,
|
||||
&req.env,
|
||||
None,
|
||||
ExecExpiration::DefaultTimeout,
|
||||
req.with_escalated_permissions,
|
||||
req.justification.clone(),
|
||||
)
|
||||
.map_err(|_| ToolError::Rejected("missing command line for PTY".to_string()))?;
|
||||
let exec_env = attempt
|
||||
.env_for(&spec)
|
||||
.env_for(spec)
|
||||
.map_err(|err| ToolError::Codex(err.into()))?;
|
||||
self.manager
|
||||
.open_session_with_exec_env(&exec_env)
|
||||
|
||||
@@ -216,7 +216,7 @@ pub(crate) struct SandboxAttempt<'a> {
|
||||
impl<'a> SandboxAttempt<'a> {
|
||||
pub fn env_for(
|
||||
&self,
|
||||
spec: &CommandSpec,
|
||||
spec: CommandSpec,
|
||||
) -> Result<crate::sandboxing::ExecEnv, SandboxTransformError> {
|
||||
self.manager.transform(
|
||||
spec,
|
||||
|
||||
@@ -57,8 +57,6 @@ impl ToolsConfig {
|
||||
ConfigShellToolType::Disabled
|
||||
} else if features.enabled(Feature::UnifiedExec) {
|
||||
ConfigShellToolType::UnifiedExec
|
||||
} else if features.enabled(Feature::ShellCommandTool) {
|
||||
ConfigShellToolType::ShellCommand
|
||||
} else {
|
||||
model_family.shell_type.clone()
|
||||
};
|
||||
@@ -1468,22 +1466,6 @@ mod tests {
|
||||
assert_contains_tool_names(&tools, &subset);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_specs_shell_command_present() {
|
||||
assert_model_tools(
|
||||
"codex-mini-latest",
|
||||
Features::with_defaults().enable(Feature::ShellCommandTool),
|
||||
&[
|
||||
"shell_command",
|
||||
"list_mcp_resources",
|
||||
"list_mcp_resource_templates",
|
||||
"read_mcp_resource",
|
||||
"update_plan",
|
||||
"view_image",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_parallel_support_flags() {
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot::error::TryRecvError;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::Duration;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::SandboxType;
|
||||
@@ -67,13 +67,18 @@ impl OutputBufferState {
|
||||
}
|
||||
|
||||
pub(crate) type OutputBuffer = Arc<Mutex<OutputBufferState>>;
|
||||
pub(crate) type OutputHandles = (OutputBuffer, Arc<Notify>);
|
||||
pub(crate) struct OutputHandles {
|
||||
pub(crate) output_buffer: OutputBuffer,
|
||||
pub(crate) output_notify: Arc<Notify>,
|
||||
pub(crate) cancellation_token: CancellationToken,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UnifiedExecSession {
|
||||
session: ExecCommandSession,
|
||||
output_buffer: OutputBuffer,
|
||||
output_notify: Arc<Notify>,
|
||||
cancellation_token: CancellationToken,
|
||||
output_task: JoinHandle<()>,
|
||||
sandbox_type: SandboxType,
|
||||
}
|
||||
@@ -86,9 +91,11 @@ impl UnifiedExecSession {
|
||||
) -> Self {
|
||||
let output_buffer = Arc::new(Mutex::new(OutputBufferState::default()));
|
||||
let output_notify = Arc::new(Notify::new());
|
||||
let cancellation_token = CancellationToken::new();
|
||||
let mut receiver = initial_output_rx;
|
||||
let buffer_clone = Arc::clone(&output_buffer);
|
||||
let notify_clone = Arc::clone(&output_notify);
|
||||
let cancellation_token_clone = cancellation_token.clone();
|
||||
let output_task = tokio::spawn(async move {
|
||||
loop {
|
||||
match receiver.recv().await {
|
||||
@@ -99,7 +106,10 @@ impl UnifiedExecSession {
|
||||
notify_clone.notify_waiters();
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue,
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
cancellation_token_clone.cancel();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -108,6 +118,7 @@ impl UnifiedExecSession {
|
||||
session,
|
||||
output_buffer,
|
||||
output_notify,
|
||||
cancellation_token,
|
||||
output_task,
|
||||
sandbox_type,
|
||||
}
|
||||
@@ -118,10 +129,11 @@ impl UnifiedExecSession {
|
||||
}
|
||||
|
||||
pub(super) fn output_handles(&self) -> OutputHandles {
|
||||
(
|
||||
Arc::clone(&self.output_buffer),
|
||||
Arc::clone(&self.output_notify),
|
||||
)
|
||||
OutputHandles {
|
||||
output_buffer: Arc::clone(&self.output_buffer),
|
||||
output_notify: Arc::clone(&self.output_notify),
|
||||
cancellation_token: self.cancellation_token.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn has_exited(&self) -> bool {
|
||||
@@ -199,20 +211,34 @@ impl UnifiedExecSession {
|
||||
};
|
||||
|
||||
if exit_ready {
|
||||
managed.signal_exit();
|
||||
managed.check_for_sandbox_denial().await?;
|
||||
return Ok(managed);
|
||||
}
|
||||
|
||||
tokio::pin!(exit_rx);
|
||||
if tokio::time::timeout(Duration::from_millis(50), &mut exit_rx)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
managed.signal_exit();
|
||||
managed.check_for_sandbox_denial().await?;
|
||||
return Ok(managed);
|
||||
}
|
||||
|
||||
tokio::spawn({
|
||||
let cancellation_token = managed.cancellation_token.clone();
|
||||
async move {
|
||||
let _ = exit_rx.await;
|
||||
cancellation_token.cancel();
|
||||
}
|
||||
});
|
||||
|
||||
Ok(managed)
|
||||
}
|
||||
|
||||
fn signal_exit(&self) {
|
||||
self.cancellation_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnifiedExecSession {
|
||||
|
||||
@@ -5,6 +5,7 @@ use tokio::sync::Notify;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
@@ -40,8 +41,20 @@ use super::clamp_yield_time;
|
||||
use super::generate_chunk_id;
|
||||
use super::resolve_max_tokens;
|
||||
use super::session::OutputBuffer;
|
||||
use super::session::OutputHandles;
|
||||
use super::session::UnifiedExecSession;
|
||||
|
||||
struct PreparedSessionHandles {
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
output_buffer: OutputBuffer,
|
||||
output_notify: Arc<Notify>,
|
||||
cancellation_token: CancellationToken,
|
||||
session_ref: Arc<Session>,
|
||||
turn_ref: Arc<TurnContext>,
|
||||
command: Vec<String>,
|
||||
cwd: PathBuf,
|
||||
}
|
||||
|
||||
impl UnifiedExecSessionManager {
|
||||
pub(crate) async fn exec_command(
|
||||
&self,
|
||||
@@ -67,10 +80,19 @@ impl UnifiedExecSessionManager {
|
||||
let yield_time_ms = clamp_yield_time(request.yield_time_ms);
|
||||
|
||||
let start = Instant::now();
|
||||
let (output_buffer, output_notify) = session.output_handles();
|
||||
let OutputHandles {
|
||||
output_buffer,
|
||||
output_notify,
|
||||
cancellation_token,
|
||||
} = session.output_handles();
|
||||
let deadline = start + Duration::from_millis(yield_time_ms);
|
||||
let collected =
|
||||
Self::collect_output_until_deadline(&output_buffer, &output_notify, deadline).await;
|
||||
let collected = Self::collect_output_until_deadline(
|
||||
&output_buffer,
|
||||
&output_notify,
|
||||
&cancellation_token,
|
||||
deadline,
|
||||
)
|
||||
.await;
|
||||
let wall_time = Instant::now().saturating_duration_since(start);
|
||||
|
||||
let text = String::from_utf8_lossy(&collected).to_string();
|
||||
@@ -129,15 +151,16 @@ impl UnifiedExecSessionManager {
|
||||
) -> Result<UnifiedExecResponse, UnifiedExecError> {
|
||||
let session_id = request.session_id;
|
||||
|
||||
let (
|
||||
let PreparedSessionHandles {
|
||||
writer_tx,
|
||||
output_buffer,
|
||||
output_notify,
|
||||
cancellation_token,
|
||||
session_ref,
|
||||
turn_ref,
|
||||
session_command,
|
||||
session_cwd,
|
||||
) = self.prepare_session_handles(session_id).await?;
|
||||
command: session_command,
|
||||
cwd: session_cwd,
|
||||
} = self.prepare_session_handles(session_id).await?;
|
||||
|
||||
let interaction_emitter = ToolEmitter::unified_exec(
|
||||
&session_command,
|
||||
@@ -176,8 +199,13 @@ impl UnifiedExecSessionManager {
|
||||
let yield_time_ms = clamp_yield_time(request.yield_time_ms);
|
||||
let start = Instant::now();
|
||||
let deadline = start + Duration::from_millis(yield_time_ms);
|
||||
let collected =
|
||||
Self::collect_output_until_deadline(&output_buffer, &output_notify, deadline).await;
|
||||
let collected = Self::collect_output_until_deadline(
|
||||
&output_buffer,
|
||||
&output_notify,
|
||||
&cancellation_token,
|
||||
deadline,
|
||||
)
|
||||
.await;
|
||||
let wall_time = Instant::now().saturating_duration_since(start);
|
||||
|
||||
let text = String::from_utf8_lossy(&collected).to_string();
|
||||
@@ -265,44 +293,27 @@ impl UnifiedExecSessionManager {
|
||||
async fn prepare_session_handles(
|
||||
&self,
|
||||
session_id: i32,
|
||||
) -> Result<
|
||||
(
|
||||
mpsc::Sender<Vec<u8>>,
|
||||
OutputBuffer,
|
||||
Arc<Notify>,
|
||||
Arc<Session>,
|
||||
Arc<TurnContext>,
|
||||
Vec<String>,
|
||||
PathBuf,
|
||||
),
|
||||
UnifiedExecError,
|
||||
> {
|
||||
) -> Result<PreparedSessionHandles, UnifiedExecError> {
|
||||
let sessions = self.sessions.lock().await;
|
||||
let (output_buffer, output_notify, writer_tx, session, turn, command, cwd) =
|
||||
if let Some(entry) = sessions.get(&session_id) {
|
||||
let (buffer, notify) = entry.session.output_handles();
|
||||
(
|
||||
buffer,
|
||||
notify,
|
||||
entry.session.writer_sender(),
|
||||
Arc::clone(&entry.session_ref),
|
||||
Arc::clone(&entry.turn_ref),
|
||||
entry.command.clone(),
|
||||
entry.cwd.clone(),
|
||||
)
|
||||
} else {
|
||||
return Err(UnifiedExecError::UnknownSessionId { session_id });
|
||||
};
|
||||
|
||||
Ok((
|
||||
writer_tx,
|
||||
let entry = sessions
|
||||
.get(&session_id)
|
||||
.ok_or(UnifiedExecError::UnknownSessionId { session_id })?;
|
||||
let OutputHandles {
|
||||
output_buffer,
|
||||
output_notify,
|
||||
session,
|
||||
turn,
|
||||
command,
|
||||
cwd,
|
||||
))
|
||||
cancellation_token,
|
||||
} = entry.session.output_handles();
|
||||
|
||||
Ok(PreparedSessionHandles {
|
||||
writer_tx: entry.session.writer_sender(),
|
||||
output_buffer,
|
||||
output_notify,
|
||||
cancellation_token,
|
||||
session_ref: Arc::clone(&entry.session_ref),
|
||||
turn_ref: Arc::clone(&entry.turn_ref),
|
||||
command: entry.command.clone(),
|
||||
cwd: entry.cwd.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_input(
|
||||
@@ -480,9 +491,13 @@ impl UnifiedExecSessionManager {
|
||||
pub(super) async fn collect_output_until_deadline(
|
||||
output_buffer: &OutputBuffer,
|
||||
output_notify: &Arc<Notify>,
|
||||
cancellation_token: &CancellationToken,
|
||||
deadline: Instant,
|
||||
) -> Vec<u8> {
|
||||
const POST_EXIT_OUTPUT_GRACE: Duration = Duration::from_millis(25);
|
||||
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
let mut exit_signal_received = cancellation_token.is_cancelled();
|
||||
loop {
|
||||
let drained_chunks;
|
||||
let mut wait_for_output = None;
|
||||
@@ -495,15 +510,27 @@ impl UnifiedExecSessionManager {
|
||||
}
|
||||
|
||||
if drained_chunks.is_empty() {
|
||||
exit_signal_received |= cancellation_token.is_cancelled();
|
||||
let remaining = deadline.saturating_duration_since(Instant::now());
|
||||
if remaining == Duration::ZERO {
|
||||
break;
|
||||
}
|
||||
|
||||
let notified = wait_for_output.unwrap_or_else(|| output_notify.notified());
|
||||
if exit_signal_received {
|
||||
let grace = remaining.min(POST_EXIT_OUTPUT_GRACE);
|
||||
if tokio::time::timeout(grace, notified).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
tokio::pin!(notified);
|
||||
let exit_notified = cancellation_token.cancelled();
|
||||
tokio::pin!(exit_notified);
|
||||
tokio::select! {
|
||||
_ = &mut notified => {}
|
||||
_ = &mut exit_notified => exit_signal_received = true,
|
||||
_ = tokio::time::sleep(remaining) => break,
|
||||
}
|
||||
continue;
|
||||
@@ -513,6 +540,7 @@ impl UnifiedExecSessionManager {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
|
||||
exit_signal_received |= cancellation_token.is_cancelled();
|
||||
if Instant::now() >= deadline {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -18,3 +18,4 @@ tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["time"] }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -172,6 +172,15 @@ pub fn sandbox_network_env_var() -> &'static str {
|
||||
codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
||||
}
|
||||
|
||||
pub fn format_with_current_shell(command: &str) -> Vec<String> {
|
||||
codex_core::shell::default_user_shell().derive_exec_args(command, true)
|
||||
}
|
||||
|
||||
pub fn format_with_current_shell_display(command: &str) -> String {
|
||||
let args = format_with_current_shell(command);
|
||||
shlex::try_join(args.iter().map(String::as_str)).expect("serialize current shell command")
|
||||
}
|
||||
|
||||
pub mod fs_wait {
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
|
||||
@@ -462,8 +462,11 @@ pub fn ev_apply_patch_function_call(call_id: &str, patch: &str) -> Value {
|
||||
|
||||
pub fn ev_shell_command_call(call_id: &str, command: &str) -> Value {
|
||||
let args = serde_json::json!({ "command": command });
|
||||
let arguments = serde_json::to_string(&args).expect("serialize shell arguments");
|
||||
ev_shell_command_call_with_args(call_id, &args)
|
||||
}
|
||||
|
||||
pub fn ev_shell_command_call_with_args(call_id: &str, args: &serde_json::Value) -> Value {
|
||||
let arguments = serde_json::to_string(args).expect("serialize shell command arguments");
|
||||
ev_function_call(call_id, "shell_command", &arguments)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,15 +17,11 @@ use core_test_support::wait_for_event;
|
||||
use regex_lite::Regex;
|
||||
use serde_json::json;
|
||||
|
||||
/// Integration test: spawn a long‑running shell tool via a mocked Responses SSE
|
||||
/// Integration test: spawn a long‑running shell_command tool via a mocked Responses SSE
|
||||
/// function call, then interrupt the session and expect TurnAborted.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"sleep 60".to_string(),
|
||||
];
|
||||
let command = "sleep 60";
|
||||
|
||||
let args = json!({
|
||||
"command": command,
|
||||
@@ -33,14 +29,19 @@ async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
})
|
||||
.to_string();
|
||||
let body = sse(vec![
|
||||
ev_function_call("call_sleep", "shell", &args),
|
||||
ev_function_call("call_sleep", "shell_command", &args),
|
||||
ev_completed("done"),
|
||||
]);
|
||||
|
||||
let server = start_mock_server().await;
|
||||
mount_sse_once(&server, body).await;
|
||||
|
||||
let codex = test_codex().build(&server).await.unwrap().codex;
|
||||
let codex = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.build(&server)
|
||||
.await
|
||||
.unwrap()
|
||||
.codex;
|
||||
|
||||
// Kick off a turn that triggers the function call.
|
||||
codex
|
||||
@@ -67,11 +68,7 @@ async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
/// responses server, and ensures the model receives the synthesized abort.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn interrupt_tool_records_history_entries() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"sleep 60".to_string(),
|
||||
];
|
||||
let command = "sleep 60";
|
||||
let call_id = "call-history";
|
||||
|
||||
let args = json!({
|
||||
@@ -81,7 +78,7 @@ async fn interrupt_tool_records_history_entries() {
|
||||
.to_string();
|
||||
let first_body = sse(vec![
|
||||
ev_response_created("resp-history"),
|
||||
ev_function_call(call_id, "shell", &args),
|
||||
ev_function_call(call_id, "shell_command", &args),
|
||||
ev_completed("resp-history"),
|
||||
]);
|
||||
let follow_up_body = sse(vec![
|
||||
@@ -92,7 +89,11 @@ async fn interrupt_tool_records_history_entries() {
|
||||
let server = start_mock_server().await;
|
||||
let response_mock = mount_sse_sequence(&server, vec![first_body, follow_up_body]).await;
|
||||
|
||||
let fixture = test_codex().build(&server).await.unwrap();
|
||||
let fixture = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.build(&server)
|
||||
.await
|
||||
.unwrap();
|
||||
let codex = Arc::clone(&fixture.codex);
|
||||
|
||||
codex
|
||||
|
||||
@@ -667,7 +667,7 @@ async fn apply_patch_cli_verification_failure_has_no_side_effects(
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<()> {
|
||||
async fn apply_patch_shell_command_heredoc_with_cd_updates_relative_workdir() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = apply_patch_harness_with(|config| {
|
||||
@@ -684,14 +684,11 @@ async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<
|
||||
|
||||
let script = "cd sub && apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: in_sub.txt\n@@\n-before\n+after\n*** End Patch\nEOF\n";
|
||||
let call_id = "shell-heredoc-cd";
|
||||
let args = json!({
|
||||
"command": ["bash", "-lc", script],
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let args = json!({ "command": script, "timeout_ms": 5_000 });
|
||||
let bodies = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
@@ -706,14 +703,14 @@ async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<
|
||||
let out = harness.function_call_stdout(call_id).await;
|
||||
assert!(
|
||||
out.contains("Success."),
|
||||
"expected successful apply_patch invocation via shell: {out}"
|
||||
"expected successful apply_patch invocation via shell_command: {out}"
|
||||
);
|
||||
assert_eq!(fs::read_to_string(&target)?, "after\n");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<()> {
|
||||
async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = apply_patch_harness_with(|config| {
|
||||
@@ -730,14 +727,11 @@ async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<(
|
||||
|
||||
let script = "apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: invalid.txt\n@@\n-nope\n+changed\n*** End Patch\nEOF\n";
|
||||
let call_id = "shell-apply-failure";
|
||||
let args = json!({
|
||||
"command": ["bash", "-lc", script],
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let args = json!({ "command": script, "timeout_ms": 5_000 });
|
||||
let bodies = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
@@ -780,10 +774,6 @@ async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<(
|
||||
);
|
||||
|
||||
let out = harness.function_call_stdout(call_id).await;
|
||||
assert!(
|
||||
out.contains("apply_patch verification failed"),
|
||||
"expected verification failure message"
|
||||
);
|
||||
assert!(
|
||||
out.contains("Failed to find expected lines in"),
|
||||
"expected failure diagnostics: {out}"
|
||||
|
||||
@@ -71,7 +71,7 @@ enum ActionKind {
|
||||
response_body: &'static str,
|
||||
},
|
||||
RunCommand {
|
||||
command: &'static [&'static str],
|
||||
command: &'static str,
|
||||
},
|
||||
RunUnifiedExecCommand {
|
||||
command: &'static str,
|
||||
@@ -97,20 +97,12 @@ impl ActionKind {
|
||||
server: &MockServer,
|
||||
call_id: &str,
|
||||
with_escalated_permissions: bool,
|
||||
) -> Result<(Value, Option<Vec<String>>)> {
|
||||
) -> Result<(Value, Option<String>)> {
|
||||
match self {
|
||||
ActionKind::WriteFile { target, content } => {
|
||||
let (path, _) = target.resolve_for_patch(test);
|
||||
let _ = fs::remove_file(&path);
|
||||
let command = vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
format!(
|
||||
"printf {content:?} > {path:?} && cat {path:?}",
|
||||
content = content,
|
||||
path = path
|
||||
),
|
||||
];
|
||||
let command = format!("printf {content:?} > {path:?} && cat {path:?}");
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
}
|
||||
@@ -127,21 +119,18 @@ impl ActionKind {
|
||||
.await;
|
||||
|
||||
let url = format!("{}{}", server.uri(), endpoint);
|
||||
let escaped_url = url.replace('\'', "\\'");
|
||||
let script = format!(
|
||||
"import sys\nimport urllib.request\nurl = {url:?}\ntry:\n data = urllib.request.urlopen(url, timeout=2).read().decode()\n print('OK:' + data.strip())\nexcept Exception as exc:\n print('ERR:' + exc.__class__.__name__)\n sys.exit(1)",
|
||||
"import sys\nimport urllib.request\nurl = '{escaped_url}'\ntry:\n data = urllib.request.urlopen(url, timeout=2).read().decode()\n print('OK:' + data.strip())\nexcept Exception as exc:\n print('ERR:' + exc.__class__.__name__)\n sys.exit(1)",
|
||||
);
|
||||
|
||||
let command = vec!["python3".to_string(), "-c".to_string(), script];
|
||||
let command = format!("python3 -c \"{script}\"");
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
}
|
||||
ActionKind::RunCommand { command } => {
|
||||
let command: Vec<String> = command
|
||||
.iter()
|
||||
.map(std::string::ToString::to_string)
|
||||
.collect();
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
let event = shell_event(call_id, command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command.to_string())))
|
||||
}
|
||||
ActionKind::RunUnifiedExecCommand {
|
||||
command,
|
||||
@@ -154,14 +143,7 @@ impl ActionKind {
|
||||
with_escalated_permissions,
|
||||
*justification,
|
||||
)?;
|
||||
Ok((
|
||||
event,
|
||||
Some(vec![
|
||||
"/bin/bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
command.to_string(),
|
||||
]),
|
||||
))
|
||||
Ok((event, Some(command.to_string())))
|
||||
}
|
||||
ActionKind::ApplyPatchFunction { target, content } => {
|
||||
let (path, patch_path) = target.resolve_for_patch(test);
|
||||
@@ -185,19 +167,19 @@ fn build_add_file_patch(patch_path: &str, content: &str) -> String {
|
||||
format!("*** Begin Patch\n*** Add File: {patch_path}\n+{content}\n*** End Patch\n")
|
||||
}
|
||||
|
||||
fn shell_apply_patch_command(patch: &str) -> Vec<String> {
|
||||
fn shell_apply_patch_command(patch: &str) -> String {
|
||||
let mut script = String::from("apply_patch <<'PATCH'\n");
|
||||
script.push_str(patch);
|
||||
if !patch.ends_with('\n') {
|
||||
script.push('\n');
|
||||
}
|
||||
script.push_str("PATCH\n");
|
||||
vec!["bash".to_string(), "-lc".to_string(), script]
|
||||
script
|
||||
}
|
||||
|
||||
fn shell_event(
|
||||
call_id: &str,
|
||||
command: &[String],
|
||||
command: &str,
|
||||
timeout_ms: u64,
|
||||
with_escalated_permissions: bool,
|
||||
) -> Result<Value> {
|
||||
@@ -209,7 +191,7 @@ fn shell_event(
|
||||
args["with_escalated_permissions"] = json!(true);
|
||||
}
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
Ok(ev_function_call(call_id, "shell", &args_str))
|
||||
Ok(ev_function_call(call_id, "shell_command", &args_str))
|
||||
}
|
||||
|
||||
fn exec_command_event(
|
||||
@@ -296,7 +278,10 @@ impl Expectation {
|
||||
}
|
||||
Expectation::FileCreatedNoExitCode { target, content } => {
|
||||
let (path, _) = target.resolve_for_patch(test);
|
||||
assert_eq!(result.exit_code, None, "expected no exit code for {path:?}");
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for {path:?}",
|
||||
);
|
||||
assert!(
|
||||
result.stdout.contains(content),
|
||||
"stdout missing {content:?}: {}",
|
||||
@@ -385,8 +370,8 @@ impl Expectation {
|
||||
);
|
||||
}
|
||||
Expectation::NetworkSuccessNoExitCode { body_contains } => {
|
||||
assert_eq!(
|
||||
result.exit_code, None,
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for successful network call: {}",
|
||||
result.stdout
|
||||
);
|
||||
@@ -433,8 +418,8 @@ impl Expectation {
|
||||
);
|
||||
}
|
||||
Expectation::CommandSuccessNoExitCode { stdout_contains } => {
|
||||
assert_eq!(
|
||||
result.exit_code, None,
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for trusted command: {}",
|
||||
result.stdout
|
||||
);
|
||||
@@ -531,10 +516,18 @@ fn parse_result(item: &Value) -> CommandResult {
|
||||
CommandResult { exit_code, stdout }
|
||||
}
|
||||
Err(_) => {
|
||||
let structured = Regex::new(r"(?s)^Exit code:\s*(-?\d+).*?Output:\n(.*)$").unwrap();
|
||||
let regex =
|
||||
Regex::new(r"(?s)^.*?Process exited with code (\d+)\n.*?Output:\n(.*)$").unwrap();
|
||||
// parse freeform output
|
||||
if let Some(captures) = regex.captures(output_str) {
|
||||
if let Some(captures) = structured.captures(output_str) {
|
||||
let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap();
|
||||
let output = captures.get(2).unwrap().as_str();
|
||||
CommandResult {
|
||||
exit_code: Some(exit_code),
|
||||
stdout: output.to_string(),
|
||||
}
|
||||
} else if let Some(captures) = regex.captures(output_str) {
|
||||
let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap();
|
||||
let output = captures.get(2).unwrap().as_str();
|
||||
CommandResult {
|
||||
@@ -553,7 +546,7 @@ fn parse_result(item: &Value) -> CommandResult {
|
||||
|
||||
async fn expect_exec_approval(
|
||||
test: &TestCodex,
|
||||
expected_command: &[String],
|
||||
expected_command: &str,
|
||||
) -> ExecApprovalRequestEvent {
|
||||
let event = wait_for_event(&test.codex, |event| {
|
||||
matches!(
|
||||
@@ -565,7 +558,12 @@ async fn expect_exec_approval(
|
||||
|
||||
match event {
|
||||
EventMsg::ExecApprovalRequest(approval) => {
|
||||
assert_eq!(approval.command, expected_command);
|
||||
let last_arg = approval
|
||||
.command
|
||||
.last()
|
||||
.map(std::string::String::as_str)
|
||||
.unwrap_or_default();
|
||||
assert_eq!(last_arg, expected_command);
|
||||
approval
|
||||
}
|
||||
EventMsg::TaskComplete(_) => panic!("expected approval request before completion"),
|
||||
@@ -660,7 +658,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
features: vec![],
|
||||
model_override: Some("gpt-5.1"),
|
||||
outcome: Outcome::Auto,
|
||||
expectation: Expectation::FileCreatedNoExitCode {
|
||||
expectation: Expectation::FileCreated {
|
||||
target: TargetPath::OutsideWorkspace("dfa_on_request_5_1.txt"),
|
||||
content: "danger-on-request",
|
||||
},
|
||||
@@ -702,7 +700,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-unless"],
|
||||
command: "echo trusted-unless",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -717,7 +715,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-unless"],
|
||||
command: "echo trusted-unless",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -880,7 +878,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-read-only"],
|
||||
command: "echo trusted-read-only",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -895,7 +893,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-read-only"],
|
||||
command: "echo trusted-read-only",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -1020,7 +1018,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
},
|
||||
},
|
||||
ScenarioSpec {
|
||||
name: "apply_patch_shell_requires_patch_approval",
|
||||
name: "apply_patch_shell_command_requires_patch_approval",
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: workspace_write(false),
|
||||
action: ActionKind::ApplyPatchShell {
|
||||
@@ -1114,7 +1112,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
},
|
||||
},
|
||||
ScenarioSpec {
|
||||
name: "apply_patch_shell_outside_requires_patch_approval",
|
||||
name: "apply_patch_shell_command_outside_requires_patch_approval",
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: workspace_write(false),
|
||||
action: ActionKind::ApplyPatchShell {
|
||||
@@ -1229,7 +1227,10 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
message_contains: if cfg!(target_os = "linux") {
|
||||
&["Permission denied"]
|
||||
} else {
|
||||
&["Permission denied|Operation not permitted|Read-only file system"]
|
||||
&[
|
||||
"Permission denied|Operation not permitted|operation not permitted|\
|
||||
Read-only file system",
|
||||
]
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1238,7 +1239,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-never"],
|
||||
command: "echo trusted-never",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -1373,7 +1374,10 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
message_contains: if cfg!(target_os = "linux") {
|
||||
&["Permission denied"]
|
||||
} else {
|
||||
&["Permission denied|Operation not permitted|Read-only file system"]
|
||||
&[
|
||||
"Permission denied|Operation not permitted|operation not permitted|\
|
||||
Read-only file system",
|
||||
]
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1509,7 +1513,7 @@ async fn run_scenario(scenario: &ScenarioSpec) -> Result<()> {
|
||||
expected_reason,
|
||||
} => {
|
||||
let command = expected_command
|
||||
.as_ref()
|
||||
.as_deref()
|
||||
.expect("exec approval requires shell command");
|
||||
let approval = expect_exec_approval(&test, command).await;
|
||||
if let Some(expected_reason) = expected_reason {
|
||||
|
||||
@@ -499,9 +499,20 @@ async fn integration_git_info_unit_test() {
|
||||
"Git info should contain repository_url"
|
||||
);
|
||||
let repo_url = git_info.repository_url.as_ref().unwrap();
|
||||
// Some hosts rewrite remotes (e.g., github.com → git@github.com), so assert against
|
||||
// the actual remote reported by git instead of a static URL.
|
||||
let expected_remote_url = std::process::Command::new("git")
|
||||
.args(["remote", "get-url", "origin"])
|
||||
.current_dir(&git_repo)
|
||||
.output()
|
||||
.unwrap();
|
||||
let expected_remote_url = String::from_utf8(expected_remote_url.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
assert_eq!(
|
||||
repo_url, "https://github.com/example/integration-test.git",
|
||||
"Repository URL should match what we configured"
|
||||
repo_url, &expected_remote_url,
|
||||
"Repository URL should match git remote get-url output"
|
||||
);
|
||||
|
||||
println!("✅ Git info collection test passed!");
|
||||
|
||||
@@ -992,7 +992,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
id: Some("web-search-id".into()),
|
||||
status: Some("completed".into()),
|
||||
action: WebSearchAction::Search {
|
||||
query: "weather".into(),
|
||||
query: Some("weather".into()),
|
||||
},
|
||||
});
|
||||
prompt.input.push(ResponseItem::FunctionCall {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
@@ -25,17 +26,17 @@ use pretty_assertions::assert_eq;
|
||||
async fn codex_delegate_forwards_exec_approval_and_proceeds_on_approval() {
|
||||
skip_if_no_network!();
|
||||
|
||||
// Sub-agent turn 1: emit a shell function_call requiring approval, then complete.
|
||||
// Sub-agent turn 1: emit a shell_command function_call requiring approval, then complete.
|
||||
let call_id = "call-exec-1";
|
||||
let args = serde_json::json!({
|
||||
"command": ["bash", "-lc", "rm -rf delegated"],
|
||||
"command": "rm -rf delegated",
|
||||
"timeout_ms": 1000,
|
||||
"with_escalated_permissions": true,
|
||||
})
|
||||
.to_string();
|
||||
let sse1 = sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &args),
|
||||
ev_function_call(call_id, "shell_command", &args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
|
||||
@@ -61,6 +62,8 @@ async fn codex_delegate_forwards_exec_approval_and_proceeds_on_approval() {
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.approval_policy = AskForApproval::OnRequest;
|
||||
config.sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5.1").expect("gpt-5.1 is a valid model");
|
||||
});
|
||||
let test = builder.build(&server).await.expect("build test codex");
|
||||
|
||||
@@ -138,6 +141,8 @@ async fn codex_delegate_forwards_patch_approval_and_proceeds_on_decision() {
|
||||
// Use a restricted sandbox so patch approval is required
|
||||
config.sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
config.include_apply_patch_tool = true;
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5.1").expect("gpt-5.1 is a valid model");
|
||||
});
|
||||
let test = builder.build(&server).await.expect("build test codex");
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ async fn manual_compact_uses_custom_prompt() {
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn manual_compact_emits_estimated_token_usage_event() {
|
||||
async fn manual_compact_emits_api_and_local_token_usage_events() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
@@ -32,7 +32,7 @@ async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput
|
||||
let params = ExecParams {
|
||||
command: cmd.iter().map(ToString::to_string).collect(),
|
||||
cwd: tmp.path().to_path_buf(),
|
||||
timeout_ms: Some(1000),
|
||||
expiration: 1000.into(),
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
@@ -21,6 +22,11 @@ use std::fs;
|
||||
|
||||
#[tokio::test]
|
||||
async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
// TODO execpolicy doesn't parse powershell commands yet
|
||||
if cfg!(windows) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
let policy_path = config.codex_home.join("policy").join("policy.codexpolicy");
|
||||
fs::create_dir_all(
|
||||
@@ -34,13 +40,16 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
r#"prefix_rule(pattern=["echo"], decision="forbidden")"#,
|
||||
)
|
||||
.expect("write policy file");
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5.1").expect("gpt-5.1 should have a model family");
|
||||
});
|
||||
let server = start_mock_server().await;
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-forbidden";
|
||||
let args = json!({
|
||||
"command": ["echo", "blocked"],
|
||||
"command": "echo blocked",
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
|
||||
@@ -48,7 +57,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -49,6 +49,7 @@ mod seatbelt;
|
||||
mod shell_serialization;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
mod text_encoding_fix;
|
||||
mod tool_harness;
|
||||
mod tool_parallelism;
|
||||
mod tools;
|
||||
|
||||
@@ -1,21 +1,10 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::wait_for_event;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
|
||||
fn sse_completed(id: &str) -> String {
|
||||
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
|
||||
@@ -39,46 +28,17 @@ fn tool_identifiers(body: &serde_json::Value) -> Vec<String> {
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
async fn collect_tool_identifiers_for_model(model: &str) -> Vec<String> {
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let sse = sse_completed(model);
|
||||
let resp_mock = responses::mount_sse_once(&server, sse).await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.model = model.to_string();
|
||||
config.model_family =
|
||||
find_family_for_model(model).unwrap_or_else(|| panic!("unknown model family for {model}"));
|
||||
config.features.disable(Feature::ApplyPatchFreeform);
|
||||
config.features.disable(Feature::ViewImageTool);
|
||||
config.features.disable(Feature::WebSearchRequest);
|
||||
config.features.disable(Feature::UnifiedExec);
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
let mut builder = test_codex().with_model(model);
|
||||
let test = builder
|
||||
.build(&server)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
.expect("create test Codex conversation");
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello tools".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
test.submit_turn("hello tools").await.expect("submit turn");
|
||||
|
||||
let body = resp_mock.single_request().body_json();
|
||||
tool_identifiers(&body)
|
||||
@@ -97,7 +57,8 @@ async fn model_selects_expected_tools() {
|
||||
"list_mcp_resources".to_string(),
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string()
|
||||
"update_plan".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"codex-mini-latest should expose the local shell tool",
|
||||
);
|
||||
@@ -111,7 +72,8 @@ async fn model_selects_expected_tools() {
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string(),
|
||||
"apply_patch".to_string()
|
||||
"apply_patch".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"gpt-5-codex should expose the apply_patch tool",
|
||||
);
|
||||
@@ -125,7 +87,8 @@ async fn model_selects_expected_tools() {
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string(),
|
||||
"apply_patch".to_string()
|
||||
"apply_patch".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"gpt-5.1-codex should expose the apply_patch tool",
|
||||
);
|
||||
@@ -139,6 +102,7 @@ async fn model_selects_expected_tools() {
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"gpt-5 should expose the apply_patch tool",
|
||||
);
|
||||
@@ -152,7 +116,8 @@ async fn model_selects_expected_tools() {
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string(),
|
||||
"apply_patch".to_string()
|
||||
"apply_patch".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"gpt-5.1 should expose the apply_patch tool",
|
||||
);
|
||||
|
||||
@@ -30,18 +30,15 @@ fn text_user_input(text: String) -> serde_json::Value {
|
||||
}
|
||||
|
||||
fn default_env_context_str(cwd: &str, shell: &Shell) -> String {
|
||||
let shell_name = shell.name();
|
||||
format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<cwd>{cwd}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
{}</environment_context>"#,
|
||||
cwd,
|
||||
match shell.name() {
|
||||
Some(name) => format!(" <shell>{name}</shell>\n"),
|
||||
None => String::new(),
|
||||
}
|
||||
<shell>{shell_name}</shell>
|
||||
</environment_context>"#
|
||||
)
|
||||
}
|
||||
|
||||
@@ -227,7 +224,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
.await?;
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let shell = default_user_shell();
|
||||
let cwd_str = config.cwd.to_string_lossy();
|
||||
let expected_env_text = default_env_context_str(&cwd_str, &shell);
|
||||
let expected_ui_text = format!(
|
||||
@@ -345,6 +342,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
|
||||
// After overriding the turn context, the environment context should be emitted again
|
||||
// reflecting the new approval policy and sandbox settings. Omit cwd because it did
|
||||
// not change.
|
||||
let shell = default_user_shell();
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
@@ -353,8 +351,10 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
<shell>{}</shell>
|
||||
</environment_context>"#,
|
||||
writable.path().to_string_lossy(),
|
||||
writable.path().display(),
|
||||
shell.name()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -522,6 +522,8 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "hello 2" } ]
|
||||
});
|
||||
let shell = default_user_shell();
|
||||
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
@@ -531,9 +533,11 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
<shell>{}</shell>
|
||||
</environment_context>"#,
|
||||
new_cwd.path().to_string_lossy(),
|
||||
writable.path().to_string_lossy(),
|
||||
new_cwd.path().display(),
|
||||
writable.path().display(),
|
||||
shell.name(),
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -610,7 +614,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let shell = default_user_shell();
|
||||
let default_cwd_lossy = default_cwd.to_string_lossy();
|
||||
let expected_ui_text = format!(
|
||||
"# AGENTS.md instructions for {default_cwd_lossy}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>"
|
||||
@@ -697,7 +701,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
let shell = default_user_shell();
|
||||
let expected_ui_text = format!(
|
||||
"# AGENTS.md instructions for {}\n\n<INSTRUCTIONS>\nbe consistent and helpful\n</INSTRUCTIONS>",
|
||||
default_cwd.to_string_lossy()
|
||||
@@ -717,14 +721,15 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
|
||||
]);
|
||||
assert_eq!(body1["input"], expected_input_1);
|
||||
|
||||
let expected_env_msg_2 = text_user_input(
|
||||
let shell_name = shell.name();
|
||||
let expected_env_msg_2 = text_user_input(format!(
|
||||
r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>{shell_name}</shell>
|
||||
</environment_context>"#
|
||||
.to_string(),
|
||||
);
|
||||
));
|
||||
let expected_user_message_2 = text_user_input("hello 2".to_string());
|
||||
let expected_input_2 = serde_json::Value::Array(vec![
|
||||
expected_ui_msg,
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#![allow(clippy::expect_used)]
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
@@ -40,6 +41,20 @@ const FIXTURE_JSON: &str = r#"{
|
||||
}
|
||||
"#;
|
||||
|
||||
fn configure_shell_command_model(output_type: ShellModelOutput, config: &mut Config) {
|
||||
if !matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(shell_command_family) = find_family_for_model("test-gpt-5-codex") {
|
||||
if config.model_family.shell_type == shell_command_family.shell_type {
|
||||
return;
|
||||
}
|
||||
config.model = shell_command_family.slug.clone();
|
||||
config.model_family = shell_command_family;
|
||||
}
|
||||
}
|
||||
|
||||
fn shell_responses(
|
||||
call_id: &str,
|
||||
command: Vec<&str>,
|
||||
@@ -112,10 +127,7 @@ async fn shell_output_stays_json_without_freeform_apply_patch(
|
||||
config.features.disable(Feature::ApplyPatchFreeform);
|
||||
config.model = "gpt-5".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5").expect("gpt-5 is a model family");
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -170,10 +182,7 @@ async fn shell_output_is_structured_with_freeform_apply_patch(
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.features.enable(Feature::ApplyPatchFreeform);
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -223,10 +232,7 @@ async fn shell_output_preserves_fixture_json_without_serialization(
|
||||
config.features.disable(Feature::ApplyPatchFreeform);
|
||||
config.model = "gpt-5".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5").expect("gpt-5 is a model family");
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -293,10 +299,7 @@ async fn shell_output_structures_fixture_with_serialization(
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.features.enable(Feature::ApplyPatchFreeform);
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -358,15 +361,12 @@ async fn shell_output_for_freeform_tool_records_duration(
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.include_apply_patch_tool = true;
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-structured";
|
||||
let responses = shell_responses(call_id, vec!["/bin/bash", "-c", "sleep 1"], output_type)?;
|
||||
let responses = shell_responses(call_id, vec!["/bin/sh", "-c", "sleep 1"], output_type)?;
|
||||
let mock = mount_sse_sequence(&server, responses).await;
|
||||
|
||||
test.submit_turn_with_policy(
|
||||
@@ -417,10 +417,7 @@ async fn shell_output_reserializes_truncated_content(output_type: ShellModelOutp
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5.1-codex").expect("gpt-5.1-codex is a model family");
|
||||
config.tool_output_token_limit = Some(200);
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
let _ = output_type;
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -722,9 +719,7 @@ async fn shell_output_is_structured_for_nonzero_exit(output_type: ShellModelOutp
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5.1-codex").expect("gpt-5.1-codex is a model family");
|
||||
config.include_apply_patch_tool = true;
|
||||
if matches!(output_type, ShellModelOutput::ShellCommand) {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
}
|
||||
configure_shell_command_model(output_type, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -760,7 +755,7 @@ async fn shell_command_output_is_freeform() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
configure_shell_command_model(ShellModelOutput::ShellCommand, config);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
@@ -812,11 +807,7 @@ async fn shell_command_output_is_not_truncated_under_10k_bytes() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_config(move |config| {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
});
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-command";
|
||||
@@ -866,11 +857,7 @@ async fn shell_command_output_is_not_truncated_over_10k_bytes() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.with_config(move |config| {
|
||||
config.features.enable(Feature::ShellCommandTool);
|
||||
});
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-command";
|
||||
|
||||
77
codex-rs/core/tests/suite/text_encoding_fix.rs
Normal file
77
codex-rs/core/tests/suite/text_encoding_fix.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
//! Integration test for the text encoding fix for issue #6178.
|
||||
//!
|
||||
//! These tests simulate VSCode's shell preview on Windows/WSL where the output
|
||||
//! may be encoded with a legacy code page before it reaches Codex.
|
||||
|
||||
use codex_core::exec::StreamOutput;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn test_utf8_shell_output() {
|
||||
// Baseline: UTF-8 output should bypass the detector and remain unchanged.
|
||||
assert_eq!(decode_shell_output("пример".as_bytes()), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp1251_shell_output() {
|
||||
// VS Code shells on Windows frequently surface CP1251 bytes for Cyrillic text.
|
||||
assert_eq!(decode_shell_output(b"\xEF\xF0\xE8\xEC\xE5\xF0"), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cp866_shell_output() {
|
||||
// Native cmd.exe still defaults to CP866; make sure we recognize that too.
|
||||
assert_eq!(decode_shell_output(b"\xAF\xE0\xA8\xAC\xA5\xE0"), "пример");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_1252_smart_decoding() {
|
||||
// Smart detection should turn fancy quotes/dashes into the proper Unicode glyphs.
|
||||
assert_eq!(
|
||||
decode_shell_output(b"\x93\x94 test \x96 dash"),
|
||||
"\u{201C}\u{201D} test \u{2013} dash"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_smart_decoding_improves_over_lossy_utf8() {
|
||||
// Regression guard: String::from_utf8_lossy() alone used to emit replacement chars here.
|
||||
let bytes = b"\x93\x94 test \x96 dash";
|
||||
assert!(
|
||||
String::from_utf8_lossy(bytes).contains('\u{FFFD}'),
|
||||
"lossy UTF-8 should inject replacement chars"
|
||||
);
|
||||
assert_eq!(
|
||||
decode_shell_output(bytes),
|
||||
"\u{201C}\u{201D} test \u{2013} dash",
|
||||
"smart decoding should keep curly quotes intact"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_ascii_and_legacy_encoding() {
|
||||
// Commands tend to mix ASCII status text with Latin-1 bytes (e.g. café).
|
||||
assert_eq!(decode_shell_output(b"Output: caf\xE9"), "Output: café"); // codespell:ignore caf
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pure_latin1_shell_output() {
|
||||
// Latin-1 by itself should still decode correctly (regression coverage for the older tests).
|
||||
assert_eq!(decode_shell_output(b"caf\xE9"), "café"); // codespell:ignore caf
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_bytes_still_fall_back_to_lossy() {
|
||||
// If detection fails, we still want the user to see replacement characters.
|
||||
let bytes = b"\xFF\xFE\xFD";
|
||||
assert_eq!(decode_shell_output(bytes), String::from_utf8_lossy(bytes));
|
||||
}
|
||||
|
||||
fn decode_shell_output(bytes: &[u8]) -> String {
|
||||
StreamOutput {
|
||||
text: bytes.to_vec(),
|
||||
truncated_after_lines: None,
|
||||
}
|
||||
.from_utf8_lossy()
|
||||
.text
|
||||
}
|
||||
@@ -146,10 +146,11 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let test = test_codex().build(&server).await?;
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let shell_args = json!({
|
||||
"command": ["/bin/sh", "-c", "sleep 0.3"],
|
||||
"command": "sleep 0.3",
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
let args_one = serde_json::to_string(&shell_args)?;
|
||||
@@ -157,8 +158,8 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
|
||||
let first_response = sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "shell", &args_one),
|
||||
ev_function_call("call-2", "shell", &args_two),
|
||||
ev_function_call("call-1", "shell_command", &args_one),
|
||||
ev_function_call("call-2", "shell_command", &args_two),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
let second_response = sse(vec![
|
||||
@@ -167,7 +168,7 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
]);
|
||||
mount_sse_sequence(&server, vec![first_response, second_response]).await;
|
||||
|
||||
let duration = run_turn_and_measure(&test, "run shell twice").await?;
|
||||
let duration = run_turn_and_measure(&test, "run shell_command twice").await?;
|
||||
assert_serial_duration(duration);
|
||||
|
||||
Ok(())
|
||||
@@ -185,14 +186,14 @@ async fn mixed_tools_fall_back_to_serial() -> anyhow::Result<()> {
|
||||
})
|
||||
.to_string();
|
||||
let shell_args = serde_json::to_string(&json!({
|
||||
"command": ["/bin/sh", "-c", "sleep 0.3"],
|
||||
"command": "sleep 0.3",
|
||||
"timeout_ms": 1_000,
|
||||
}))?;
|
||||
|
||||
let first_response = sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "test_sync_tool", &sync_args),
|
||||
ev_function_call("call-2", "shell", &shell_args),
|
||||
ev_function_call("call-2", "shell_command", &shell_args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
let second_response = sse(vec![
|
||||
@@ -215,7 +216,7 @@ async fn tool_results_grouped() -> anyhow::Result<()> {
|
||||
let test = build_codex_with_test_tool(&server).await?;
|
||||
|
||||
let shell_args = serde_json::to_string(&json!({
|
||||
"command": ["/bin/sh", "-c", "echo 'shell output'"],
|
||||
"command": "echo 'shell output'",
|
||||
"timeout_ms": 1_000,
|
||||
}))?;
|
||||
|
||||
@@ -223,9 +224,9 @@ async fn tool_results_grouped() -> anyhow::Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "shell", &shell_args),
|
||||
ev_function_call("call-2", "shell", &shell_args),
|
||||
ev_function_call("call-3", "shell", &shell_args),
|
||||
ev_function_call("call-1", "shell_command", &shell_args),
|
||||
ev_function_call("call-2", "shell_command", &shell_args),
|
||||
ev_function_call("call-3", "shell_command", &shell_args),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -98,7 +98,7 @@ async fn truncate_function_error_trims_respond_to_model() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
@@ -106,7 +106,7 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| {
|
||||
config.tool_output_token_limit = Some(100_000);
|
||||
});
|
||||
@@ -114,28 +114,22 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -167,7 +161,10 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
"expected truncated shell output to be plain text"
|
||||
);
|
||||
|
||||
assert_eq!(output.len(), 400097, "we should be almost 100k tokens");
|
||||
assert!(
|
||||
(400000..=401000).contains(&output.len()),
|
||||
"we should be almost 100k tokens"
|
||||
);
|
||||
|
||||
assert!(
|
||||
!output.contains("tokens truncated"),
|
||||
@@ -177,7 +174,7 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
@@ -185,34 +182,28 @@ async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -244,15 +235,20 @@ async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
"expected truncated shell output to be plain text"
|
||||
);
|
||||
|
||||
assert_eq!(output.len(), 9976); // ~10k characters
|
||||
let truncated_pattern = r#"(?s)^Exit code: 0\nWall time: 0 seconds\nTotal output lines: 100000\nOutput:\n.*?…\d+ chars truncated….*$"#;
|
||||
let truncated_pattern = r#"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 100000\nOutput:\n.*?…\d+ chars truncated….*$"#;
|
||||
|
||||
assert_regex_match(truncated_pattern, &output);
|
||||
|
||||
let len = output.len();
|
||||
assert!(
|
||||
(9_900..=10_100).contains(&len),
|
||||
"expected ~10k chars after truncation, got {len}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
@@ -260,7 +256,7 @@ async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.model = "gpt-5.1-codex".to_string();
|
||||
config.model_family =
|
||||
@@ -269,28 +265,22 @@ async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -340,7 +330,7 @@ $"#;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ensures shell tool outputs that exceed the line limit are truncated only once.
|
||||
// Ensures shell_command outputs that exceed the line limit are truncated only once.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_truncated_only_once() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -354,27 +344,21 @@ async fn tool_call_output_truncated_only_once() -> Result<()> {
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
let call_id = "shell-single-truncation";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 10000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 10000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 10000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 10000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -614,7 +598,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
|
||||
let call_id = "shell-token-marker";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 150"],
|
||||
"command": "seq 1 150",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
@@ -622,7 +606,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -645,7 +629,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
.function_call_output_text(call_id)
|
||||
.context("shell output present")?;
|
||||
|
||||
let pattern = r#"(?s)^\{"output":"Total output lines: 150\\n\\n1\\n2\\n3\\n4\\n5\\n.*?…\d+ tokens truncated…7\\n138\\n139\\n140\\n141\\n142\\n143\\n144\\n145\\n146\\n147\\n148\\n149\\n150\\n","metadata":\{"exit_code":0,"duration_seconds":0\.0\}\}$"#;
|
||||
let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*tokens truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$";
|
||||
|
||||
assert_regex_match(pattern, &output);
|
||||
|
||||
@@ -667,7 +651,7 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
|
||||
let call_id = "shell-byte-marker";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 150"],
|
||||
"command": "seq 1 150",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
@@ -675,7 +659,7 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -698,16 +682,16 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
.function_call_output_text(call_id)
|
||||
.context("shell output present")?;
|
||||
|
||||
let pattern = r#"(?s)^\{"output":"Total output lines: 150\\n\\n1\\n2\\n3\\n4\\n5.*?…\d+ chars truncated…7\\n138\\n139\\n140\\n141\\n142\\n143\\n144\\n145\\n146\\n147\\n148\\n149\\n150\\n","metadata":\{"exit_code":0,"duration_seconds":0\.0\}\}$"#;
|
||||
let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*chars truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$";
|
||||
|
||||
assert_regex_match(pattern, &output);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Shell tool output should remain intact when the config opts into a large token budget.
|
||||
// shell_command output should remain intact when the config opts into a large token budget.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
async fn shell_command_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
@@ -721,7 +705,7 @@ async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
|
||||
let call_id = "shell-no-trunc";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 1000"],
|
||||
"command": "seq 1 1000",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let expected_body: String = (1..=1000).map(|i| format!("{i}\n")).collect();
|
||||
@@ -730,7 +714,7 @@ async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -904,6 +904,98 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_respects_early_exit_notifications() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.features.enable(Feature::UnifiedExec);
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "uexec-early-exit";
|
||||
let args = serde_json::json!({
|
||||
"cmd": "sleep 0.05",
|
||||
"yield_time_ms": 31415,
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
text: "watch early exit timing".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let outputs = collect_tool_outputs(&bodies)?;
|
||||
let output = outputs
|
||||
.get(call_id)
|
||||
.expect("missing early exit unified_exec output");
|
||||
|
||||
assert!(
|
||||
output.session_id.is_none(),
|
||||
"short-lived process should not keep a session alive"
|
||||
);
|
||||
assert_eq!(
|
||||
output.exit_code,
|
||||
Some(0),
|
||||
"short-lived process should exit successfully"
|
||||
);
|
||||
|
||||
let wall_time = output.wall_time_seconds;
|
||||
assert!(
|
||||
wall_time < 0.75,
|
||||
"wall_time should reflect early exit rather than the full yield time; got {wall_time}"
|
||||
);
|
||||
assert!(
|
||||
output.output.is_empty(),
|
||||
"sleep command should not emit output, got {:?}",
|
||||
output.output
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -1530,8 +1622,8 @@ async fn unified_exec_formats_large_output_summary() -> Result<()> {
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let script = r#"python3 - <<'PY'
|
||||
for i in range(10000):
|
||||
print("token token ")
|
||||
import sys
|
||||
sys.stdout.write("token token \n" * 5000)
|
||||
PY
|
||||
"#;
|
||||
|
||||
|
||||
@@ -279,23 +279,19 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
config.tool_output_token_limit = Some(100);
|
||||
config.model = "gpt-5.1-codex".to_string();
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5-codex").expect("gpt-5-codex is a model family");
|
||||
find_family_for_model("gpt-5.1-codex").expect("gpt-5.1-codex is a model family");
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "user-shell-double-truncation";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 2000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"command": "for ($i=1; $i -le 2000; $i++) { Write-Output $i }",
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 2000"],
|
||||
"command": "seq 1 2000",
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
};
|
||||
@@ -304,7 +300,7 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -319,19 +315,22 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
.await;
|
||||
|
||||
fixture
|
||||
.submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess)
|
||||
.submit_turn_with_policy(
|
||||
"trigger big shell_command output",
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let output = mock2
|
||||
.single_request()
|
||||
.function_call_output_text(call_id)
|
||||
.context("function_call_output present for shell call")?;
|
||||
.context("function_call_output present for shell_command call")?;
|
||||
|
||||
let truncation_headers = output.matches("Total output lines:").count();
|
||||
|
||||
assert_eq!(
|
||||
truncation_headers, 1,
|
||||
"shell output should carry only one truncation header: {output}"
|
||||
"shell_command output should carry only one truncation header: {output}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -49,6 +49,7 @@ tokio = { workspace = true, features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ mod escalation_policy;
|
||||
mod mcp;
|
||||
mod mcp_escalation_policy;
|
||||
mod socket;
|
||||
mod stopwatch;
|
||||
|
||||
/// Default value of --execve option relative to the current executable.
|
||||
/// Note this must match the name of the binary as specified in Cargo.toml.
|
||||
|
||||
@@ -13,6 +13,7 @@ use codex_core::exec::process_exec_tool_call;
|
||||
use codex_core::get_platform_sandbox;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use tokio::process::Command;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::posix::escalate_protocol::BASH_EXEC_WRAPPER_ENV_VAR;
|
||||
use crate::posix::escalate_protocol::ESCALATE_SOCKET_ENV_VAR;
|
||||
@@ -24,6 +25,7 @@ use crate::posix::escalate_protocol::SuperExecResult;
|
||||
use crate::posix::escalation_policy::EscalationPolicy;
|
||||
use crate::posix::socket::AsyncDatagramSocket;
|
||||
use crate::posix::socket::AsyncSocket;
|
||||
use codex_core::exec::ExecExpiration;
|
||||
|
||||
pub(crate) struct EscalateServer {
|
||||
bash_path: PathBuf,
|
||||
@@ -48,7 +50,7 @@ impl EscalateServer {
|
||||
command: String,
|
||||
env: HashMap<String, String>,
|
||||
workdir: PathBuf,
|
||||
timeout_ms: Option<u64>,
|
||||
cancel_rx: CancellationToken,
|
||||
) -> anyhow::Result<ExecResult> {
|
||||
let (escalate_server, escalate_client) = AsyncDatagramSocket::pair()?;
|
||||
let client_socket = escalate_client.into_inner();
|
||||
@@ -79,7 +81,7 @@ impl EscalateServer {
|
||||
command,
|
||||
],
|
||||
cwd: PathBuf::from(&workdir),
|
||||
timeout_ms,
|
||||
expiration: ExecExpiration::Cancellation(cancel_rx),
|
||||
env,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
|
||||
@@ -22,6 +22,7 @@ use crate::posix::escalate_server::EscalateServer;
|
||||
use crate::posix::escalate_server::{self};
|
||||
use crate::posix::mcp_escalation_policy::ExecPolicy;
|
||||
use crate::posix::mcp_escalation_policy::McpEscalationPolicy;
|
||||
use crate::posix::stopwatch::Stopwatch;
|
||||
|
||||
/// Path to our patched bash.
|
||||
const CODEX_BASH_PATH_ENV_VAR: &str = "CODEX_BASH_PATH";
|
||||
@@ -87,10 +88,17 @@ impl ExecTool {
|
||||
context: RequestContext<RoleServer>,
|
||||
Parameters(params): Parameters<ExecParams>,
|
||||
) -> Result<CallToolResult, McpError> {
|
||||
let effective_timeout = Duration::from_millis(
|
||||
params
|
||||
.timeout_ms
|
||||
.unwrap_or(codex_core::exec::DEFAULT_EXEC_COMMAND_TIMEOUT_MS),
|
||||
);
|
||||
let stopwatch = Stopwatch::new(effective_timeout);
|
||||
let cancel_token = stopwatch.cancellation_token();
|
||||
let escalate_server = EscalateServer::new(
|
||||
self.bash_path.clone(),
|
||||
self.execve_wrapper.clone(),
|
||||
McpEscalationPolicy::new(self.policy, context),
|
||||
McpEscalationPolicy::new(self.policy, context, stopwatch.clone()),
|
||||
);
|
||||
let result = escalate_server
|
||||
.exec(
|
||||
@@ -98,7 +106,7 @@ impl ExecTool {
|
||||
// TODO: use ShellEnvironmentPolicy
|
||||
std::env::vars().collect(),
|
||||
PathBuf::from(¶ms.workdir),
|
||||
params.timeout_ms,
|
||||
cancel_token,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| McpError::internal_error(e.to_string(), None))?;
|
||||
|
||||
@@ -10,6 +10,7 @@ use rmcp::service::RequestContext;
|
||||
|
||||
use crate::posix::escalate_protocol::EscalateAction;
|
||||
use crate::posix::escalation_policy::EscalationPolicy;
|
||||
use crate::posix::stopwatch::Stopwatch;
|
||||
|
||||
/// This is the policy which decides how to handle an exec() call.
|
||||
///
|
||||
@@ -34,11 +35,20 @@ pub(crate) enum ExecPolicyOutcome {
|
||||
pub(crate) struct McpEscalationPolicy {
|
||||
policy: ExecPolicy,
|
||||
context: RequestContext<RoleServer>,
|
||||
stopwatch: Stopwatch,
|
||||
}
|
||||
|
||||
impl McpEscalationPolicy {
|
||||
pub(crate) fn new(policy: ExecPolicy, context: RequestContext<RoleServer>) -> Self {
|
||||
Self { policy, context }
|
||||
pub(crate) fn new(
|
||||
policy: ExecPolicy,
|
||||
context: RequestContext<RoleServer>,
|
||||
stopwatch: Stopwatch,
|
||||
) -> Self {
|
||||
Self {
|
||||
policy,
|
||||
context,
|
||||
stopwatch,
|
||||
}
|
||||
}
|
||||
|
||||
async fn prompt(
|
||||
@@ -54,25 +64,34 @@ impl McpEscalationPolicy {
|
||||
} else {
|
||||
format!("{} {}", file.display(), args)
|
||||
};
|
||||
context
|
||||
.peer
|
||||
.create_elicitation(CreateElicitationRequestParam {
|
||||
message: format!("Allow agent to run `{command}` in `{}`?", workdir.display()),
|
||||
requested_schema: ElicitationSchema::builder()
|
||||
.title("Execution Permission Request")
|
||||
.optional_string_with("reason", |schema| {
|
||||
schema.description("Optional reason for allowing or denying execution")
|
||||
self.stopwatch
|
||||
.pause_for(async {
|
||||
context
|
||||
.peer
|
||||
.create_elicitation(CreateElicitationRequestParam {
|
||||
message: format!(
|
||||
"Allow agent to run `{command}` in `{}`?",
|
||||
workdir.display()
|
||||
),
|
||||
requested_schema: ElicitationSchema::builder()
|
||||
.title("Execution Permission Request")
|
||||
.optional_string_with("reason", |schema| {
|
||||
schema.description(
|
||||
"Optional reason for allowing or denying execution",
|
||||
)
|
||||
})
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
McpError::internal_error(
|
||||
format!("failed to build elicitation schema: {e}"),
|
||||
None,
|
||||
)
|
||||
})?,
|
||||
})
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
McpError::internal_error(
|
||||
format!("failed to build elicitation schema: {e}"),
|
||||
None,
|
||||
)
|
||||
})?,
|
||||
.await
|
||||
.map_err(|e| McpError::internal_error(e.to_string(), None))
|
||||
})
|
||||
.await
|
||||
.map_err(|e| McpError::internal_error(e.to_string(), None))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
211
codex-rs/exec-server/src/posix/stopwatch.rs
Normal file
211
codex-rs/exec-server/src/posix/stopwatch.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Notify;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Stopwatch {
|
||||
limit: Duration,
|
||||
inner: Arc<Mutex<StopwatchState>>,
|
||||
notify: Arc<Notify>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct StopwatchState {
|
||||
elapsed: Duration,
|
||||
running_since: Option<Instant>,
|
||||
active_pauses: u32,
|
||||
}
|
||||
|
||||
impl Stopwatch {
|
||||
pub(crate) fn new(limit: Duration) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(StopwatchState {
|
||||
elapsed: Duration::ZERO,
|
||||
running_since: Some(Instant::now()),
|
||||
active_pauses: 0,
|
||||
})),
|
||||
notify: Arc::new(Notify::new()),
|
||||
limit,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn cancellation_token(&self) -> CancellationToken {
|
||||
let limit = self.limit;
|
||||
let token = CancellationToken::new();
|
||||
let cancel = token.clone();
|
||||
let inner = Arc::clone(&self.inner);
|
||||
let notify = Arc::clone(&self.notify);
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let (remaining, running) = {
|
||||
let guard = inner.lock().await;
|
||||
let elapsed = guard.elapsed
|
||||
+ guard
|
||||
.running_since
|
||||
.map(|since| since.elapsed())
|
||||
.unwrap_or_default();
|
||||
if elapsed >= limit {
|
||||
break;
|
||||
}
|
||||
(limit - elapsed, guard.running_since.is_some())
|
||||
};
|
||||
|
||||
if !running {
|
||||
notify.notified().await;
|
||||
continue;
|
||||
}
|
||||
|
||||
let sleep = tokio::time::sleep(remaining);
|
||||
tokio::pin!(sleep);
|
||||
tokio::select! {
|
||||
_ = &mut sleep => {
|
||||
break;
|
||||
}
|
||||
_ = notify.notified() => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel.cancel();
|
||||
});
|
||||
token
|
||||
}
|
||||
|
||||
/// Runs `fut`, pausing the stopwatch while the future is pending. The clock
|
||||
/// resumes automatically when the future completes. Nested/overlapping
|
||||
/// calls are reference-counted so the stopwatch only resumes when every
|
||||
/// pause is lifted.
|
||||
pub(crate) async fn pause_for<F, T>(&self, fut: F) -> T
|
||||
where
|
||||
F: Future<Output = T>,
|
||||
{
|
||||
self.pause().await;
|
||||
let result = fut.await;
|
||||
self.resume().await;
|
||||
result
|
||||
}
|
||||
|
||||
async fn pause(&self) {
|
||||
let mut guard = self.inner.lock().await;
|
||||
guard.active_pauses += 1;
|
||||
if guard.active_pauses == 1
|
||||
&& let Some(since) = guard.running_since.take()
|
||||
{
|
||||
guard.elapsed += since.elapsed();
|
||||
self.notify.notify_waiters();
|
||||
}
|
||||
}
|
||||
|
||||
async fn resume(&self) {
|
||||
let mut guard = self.inner.lock().await;
|
||||
if guard.active_pauses == 0 {
|
||||
return;
|
||||
}
|
||||
guard.active_pauses -= 1;
|
||||
if guard.active_pauses == 0 && guard.running_since.is_none() {
|
||||
guard.running_since = Some(Instant::now());
|
||||
self.notify.notify_waiters();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Stopwatch;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::sleep;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[tokio::test]
|
||||
async fn cancellation_receiver_fires_after_limit() {
|
||||
let stopwatch = Stopwatch::new(Duration::from_millis(50));
|
||||
let token = stopwatch.cancellation_token();
|
||||
let start = Instant::now();
|
||||
token.cancelled().await;
|
||||
assert!(start.elapsed() >= Duration::from_millis(50));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pause_prevents_timeout_until_resumed() {
|
||||
let stopwatch = Stopwatch::new(Duration::from_millis(50));
|
||||
let token = stopwatch.cancellation_token();
|
||||
|
||||
let pause_handle = tokio::spawn({
|
||||
let stopwatch = stopwatch.clone();
|
||||
async move {
|
||||
stopwatch
|
||||
.pause_for(async {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
assert!(
|
||||
timeout(Duration::from_millis(30), token.cancelled())
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
|
||||
pause_handle.await.expect("pause task should finish");
|
||||
|
||||
token.cancelled().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn overlapping_pauses_only_resume_once() {
|
||||
let stopwatch = Stopwatch::new(Duration::from_millis(50));
|
||||
let token = stopwatch.cancellation_token();
|
||||
|
||||
// First pause.
|
||||
let pause1 = {
|
||||
let stopwatch = stopwatch.clone();
|
||||
tokio::spawn(async move {
|
||||
stopwatch
|
||||
.pause_for(async {
|
||||
sleep(Duration::from_millis(80)).await;
|
||||
})
|
||||
.await;
|
||||
})
|
||||
};
|
||||
|
||||
// Overlapping pause that ends sooner.
|
||||
let pause2 = {
|
||||
let stopwatch = stopwatch.clone();
|
||||
tokio::spawn(async move {
|
||||
stopwatch
|
||||
.pause_for(async {
|
||||
sleep(Duration::from_millis(30)).await;
|
||||
})
|
||||
.await;
|
||||
})
|
||||
};
|
||||
|
||||
// While both pauses are active, the cancellation should not fire.
|
||||
assert!(
|
||||
timeout(Duration::from_millis(40), token.cancelled())
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
|
||||
pause2.await.expect("short pause should complete");
|
||||
|
||||
// Still paused because the long pause is active.
|
||||
assert!(
|
||||
timeout(Duration::from_millis(30), token.cancelled())
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
|
||||
pause1.await.expect("long pause should complete");
|
||||
|
||||
// Now the stopwatch should resume and hit the limit shortly after.
|
||||
token.cancelled().await;
|
||||
}
|
||||
}
|
||||
@@ -101,7 +101,7 @@ pub struct ResumeArgs {
|
||||
pub session_id: Option<String>,
|
||||
|
||||
/// Resume the most recent recorded session (newest) without specifying an id.
|
||||
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
|
||||
#[arg(long = "last", default_value_t = false)]
|
||||
pub last: bool,
|
||||
|
||||
/// Prompt to send after resuming the session. If `-` is used, read from stdin.
|
||||
|
||||
@@ -161,7 +161,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
fn process_event(&mut self, event: Event) -> CodexStatus {
|
||||
let Event { id: _, msg } = event;
|
||||
match msg {
|
||||
EventMsg::Error(ErrorEvent { message }) => {
|
||||
EventMsg::Error(ErrorEvent { message, .. }) => {
|
||||
let prefix = "ERROR:".style(self.red);
|
||||
ts_msg!(self, "{prefix} {message}");
|
||||
}
|
||||
@@ -221,7 +221,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
ts_msg!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::StreamError(StreamErrorEvent { message }) => {
|
||||
EventMsg::StreamError(StreamErrorEvent { message, .. }) => {
|
||||
ts_msg!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::TaskStarted(_) => {
|
||||
|
||||
@@ -144,6 +144,7 @@ pub enum CommandExecutionStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Declined,
|
||||
}
|
||||
|
||||
/// A command executed by the agent.
|
||||
|
||||
@@ -82,7 +82,21 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
let prompt_arg = match &command {
|
||||
// Allow prompt before the subcommand by falling back to the parent-level prompt
|
||||
// when the Resume subcommand did not provide its own prompt.
|
||||
Some(ExecCommand::Resume(args)) => args.prompt.clone().or(prompt),
|
||||
Some(ExecCommand::Resume(args)) => {
|
||||
let resume_prompt = args
|
||||
.prompt
|
||||
.clone()
|
||||
// When using `resume --last <PROMPT>`, clap still parses the first positional
|
||||
// as `session_id`. Reinterpret it as the prompt so the flag works with JSON mode.
|
||||
.or_else(|| {
|
||||
if args.last {
|
||||
args.session_id.clone()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
resume_prompt.or(prompt)
|
||||
}
|
||||
None => prompt,
|
||||
};
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ use codex_exec::exec_events::WebSearchItem;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
use codex_protocol::protocol::CodexErrorInfo;
|
||||
use mcp_types::CallToolResult;
|
||||
use mcp_types::ContentBlock;
|
||||
use mcp_types::TextContent;
|
||||
@@ -539,6 +540,7 @@ fn error_event_produces_error() {
|
||||
"e1",
|
||||
EventMsg::Error(codex_core::protocol::ErrorEvent {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::Other),
|
||||
}),
|
||||
));
|
||||
assert_eq!(
|
||||
@@ -578,6 +580,7 @@ fn stream_error_event_produces_error() {
|
||||
"e1",
|
||||
EventMsg::StreamError(codex_core::protocol::StreamErrorEvent {
|
||||
message: "retrying".to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::Other),
|
||||
}),
|
||||
));
|
||||
assert_eq!(
|
||||
@@ -596,6 +599,7 @@ fn error_followed_by_task_complete_produces_turn_failed() {
|
||||
"e1",
|
||||
EventMsg::Error(ErrorEvent {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::Other),
|
||||
}),
|
||||
);
|
||||
assert_eq!(
|
||||
|
||||
@@ -123,6 +123,60 @@ fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_resume_last_accepts_prompt_after_flag_in_json_mode() -> anyhow::Result<()> {
|
||||
let test = test_codex_exec();
|
||||
let fixture =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/cli_responses_fixture.sse");
|
||||
|
||||
// 1) First run: create a session with a unique marker in the content.
|
||||
let marker = format!("resume-last-json-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt)
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
// Find the created session file containing the marker.
|
||||
let sessions_dir = test.home_path().join("sessions");
|
||||
let path = find_session_file_containing_marker(&sessions_dir, &marker)
|
||||
.expect("no session file found after first run");
|
||||
|
||||
// 2) Second run: resume the most recent file and pass the prompt after --last.
|
||||
let marker2 = format!("resume-last-json-2-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg("--json")
|
||||
.arg("resume")
|
||||
.arg("--last")
|
||||
.arg(&prompt2)
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let resumed_path = find_session_file_containing_marker(&sessions_dir, &marker2)
|
||||
.expect("no resumed session file containing marker2");
|
||||
assert_eq!(
|
||||
resumed_path, path,
|
||||
"resume --last should append to existing file"
|
||||
);
|
||||
let content = std::fs::read_to_string(&resumed_path)?;
|
||||
assert!(content.contains(&marker));
|
||||
assert!(content.contains(&marker2));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
let test = test_codex_exec();
|
||||
|
||||
@@ -20,18 +20,18 @@ prefix_rule(
|
||||
```
|
||||
|
||||
## CLI
|
||||
- Provide one or more policy files (for example `src/default.codexpolicy`) to check a command:
|
||||
- From the Codex CLI, run `codex execpolicy check` subcommand with one or more policy files (for example `src/default.codexpolicy`) to check a command:
|
||||
```bash
|
||||
codex execpolicy check --policy path/to/policy.codexpolicy git status
|
||||
```
|
||||
- Pass multiple `--policy` flags to merge rules, evaluated in the order provided, and use `--pretty` for formatted JSON.
|
||||
- You can also run the standalone dev binary directly during development:
|
||||
```bash
|
||||
cargo run -p codex-execpolicy -- check --policy path/to/policy.codexpolicy git status
|
||||
```
|
||||
- Pass multiple `--policy` flags to merge rules, evaluated in the order provided:
|
||||
```bash
|
||||
cargo run -p codex-execpolicy -- check --policy base.codexpolicy --policy overrides.codexpolicy git status
|
||||
```
|
||||
- Output is JSON by default; pass `--pretty` for pretty-printed JSON
|
||||
- Example outcomes:
|
||||
- Match: `{"match": { ... "decision": "allow" ... }}`
|
||||
- No match: `"noMatch"`
|
||||
- No match: `{"noMatch": {}}`
|
||||
|
||||
## Response shapes
|
||||
- Match:
|
||||
@@ -53,8 +53,10 @@ cargo run -p codex-execpolicy -- check --policy base.codexpolicy --policy overri
|
||||
|
||||
- No match:
|
||||
```json
|
||||
"noMatch"
|
||||
{"noMatch": {}}
|
||||
```
|
||||
|
||||
- `matchedRules` lists every rule whose prefix matched the command; `matchedPrefix` is the exact prefix that matched.
|
||||
- The effective `decision` is the strictest severity across all matches (`forbidden` > `prompt` > `allow`).
|
||||
|
||||
Note: `execpolicy` commands are still in preview. The API may have breaking changes in the future.
|
||||
|
||||
67
codex-rs/execpolicy/src/execpolicycheck.rs
Normal file
67
codex-rs/execpolicy/src/execpolicycheck.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
|
||||
use crate::Evaluation;
|
||||
use crate::Policy;
|
||||
use crate::PolicyParser;
|
||||
|
||||
/// Arguments for evaluating a command against one or more execpolicy files.
|
||||
#[derive(Debug, Parser, Clone)]
|
||||
pub struct ExecPolicyCheckCommand {
|
||||
/// Paths to execpolicy files to evaluate (repeatable).
|
||||
#[arg(short = 'p', long = "policy", value_name = "PATH", required = true)]
|
||||
pub policies: Vec<PathBuf>,
|
||||
|
||||
/// Pretty-print the JSON output.
|
||||
#[arg(long)]
|
||||
pub pretty: bool,
|
||||
|
||||
/// Command tokens to check against the policy.
|
||||
#[arg(
|
||||
value_name = "COMMAND",
|
||||
required = true,
|
||||
trailing_var_arg = true,
|
||||
allow_hyphen_values = true
|
||||
)]
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
impl ExecPolicyCheckCommand {
|
||||
/// Load the policies for this command, evaluate the command, and render JSON output.
|
||||
pub fn run(&self) -> Result<()> {
|
||||
let policy = load_policies(&self.policies)?;
|
||||
let evaluation = policy.check(&self.command);
|
||||
|
||||
let json = format_evaluation_json(&evaluation, self.pretty)?;
|
||||
println!("{json}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_evaluation_json(evaluation: &Evaluation, pretty: bool) -> Result<String> {
|
||||
if pretty {
|
||||
serde_json::to_string_pretty(evaluation).map_err(Into::into)
|
||||
} else {
|
||||
serde_json::to_string(evaluation).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_policies(policy_paths: &[PathBuf]) -> Result<Policy> {
|
||||
let mut parser = PolicyParser::new();
|
||||
|
||||
for policy_path in policy_paths {
|
||||
let policy_file_contents = fs::read_to_string(policy_path)
|
||||
.with_context(|| format!("failed to read policy at {}", policy_path.display()))?;
|
||||
let policy_identifier = policy_path.to_string_lossy().to_string();
|
||||
parser
|
||||
.parse(&policy_identifier, &policy_file_contents)
|
||||
.with_context(|| format!("failed to parse policy at {}", policy_path.display()))?;
|
||||
}
|
||||
|
||||
Ok(parser.build())
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
pub mod decision;
|
||||
pub mod error;
|
||||
pub mod execpolicycheck;
|
||||
pub mod parser;
|
||||
pub mod policy;
|
||||
pub mod rule;
|
||||
@@ -7,6 +8,7 @@ pub mod rule;
|
||||
pub use decision::Decision;
|
||||
pub use error::Error;
|
||||
pub use error::Result;
|
||||
pub use execpolicycheck::ExecPolicyCheckCommand;
|
||||
pub use parser::PolicyParser;
|
||||
pub use policy::Evaluation;
|
||||
pub use policy::Policy;
|
||||
|
||||
@@ -1,66 +1,22 @@
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use codex_execpolicy::PolicyParser;
|
||||
use codex_execpolicy::ExecPolicyCheckCommand;
|
||||
|
||||
/// CLI for evaluating exec policies
|
||||
#[derive(Parser)]
|
||||
#[command(name = "codex-execpolicy")]
|
||||
enum Cli {
|
||||
/// Evaluate a command against a policy.
|
||||
Check {
|
||||
#[arg(short, long = "policy", value_name = "PATH", required = true)]
|
||||
policies: Vec<PathBuf>,
|
||||
|
||||
/// Pretty-print the JSON output.
|
||||
#[arg(long)]
|
||||
pretty: bool,
|
||||
|
||||
/// Command tokens to check.
|
||||
#[arg(
|
||||
value_name = "COMMAND",
|
||||
required = true,
|
||||
trailing_var_arg = true,
|
||||
allow_hyphen_values = true
|
||||
)]
|
||||
command: Vec<String>,
|
||||
},
|
||||
Check(ExecPolicyCheckCommand),
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let cli = Cli::parse();
|
||||
match cli {
|
||||
Cli::Check {
|
||||
policies,
|
||||
command,
|
||||
pretty,
|
||||
} => cmd_check(policies, command, pretty),
|
||||
Cli::Check(cmd) => cmd_check(cmd),
|
||||
}
|
||||
}
|
||||
|
||||
fn cmd_check(policy_paths: Vec<PathBuf>, args: Vec<String>, pretty: bool) -> Result<()> {
|
||||
let policy = load_policies(&policy_paths)?;
|
||||
|
||||
let eval = policy.check(&args);
|
||||
let json = if pretty {
|
||||
serde_json::to_string_pretty(&eval)?
|
||||
} else {
|
||||
serde_json::to_string(&eval)?
|
||||
};
|
||||
println!("{json}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_policies(policy_paths: &[PathBuf]) -> Result<codex_execpolicy::Policy> {
|
||||
let mut parser = PolicyParser::new();
|
||||
for policy_path in policy_paths {
|
||||
let policy_file_contents = fs::read_to_string(policy_path)
|
||||
.with_context(|| format!("failed to read policy at {}", policy_path.display()))?;
|
||||
let policy_identifier = policy_path.to_string_lossy().to_string();
|
||||
parser.parse(&policy_identifier, &policy_file_contents)?;
|
||||
}
|
||||
Ok(parser.build())
|
||||
fn cmd_check(cmd: ExecPolicyCheckCommand) -> Result<()> {
|
||||
cmd.run()
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ impl Policy {
|
||||
let rules = match cmd.first() {
|
||||
Some(first) => match self.rules_by_program.get_vec(first) {
|
||||
Some(rules) => rules,
|
||||
None => return Evaluation::NoMatch,
|
||||
None => return Evaluation::NoMatch {},
|
||||
},
|
||||
None => return Evaluation::NoMatch,
|
||||
None => return Evaluation::NoMatch {},
|
||||
};
|
||||
|
||||
let matched_rules: Vec<RuleMatch> =
|
||||
@@ -39,7 +39,7 @@ impl Policy {
|
||||
decision,
|
||||
matched_rules,
|
||||
},
|
||||
None => Evaluation::NoMatch,
|
||||
None => Evaluation::NoMatch {},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ impl Policy {
|
||||
.into_iter()
|
||||
.flat_map(|command| match self.check(command.as_ref()) {
|
||||
Evaluation::Match { matched_rules, .. } => matched_rules,
|
||||
Evaluation::NoMatch => Vec::new(),
|
||||
Evaluation::NoMatch { .. } => Vec::new(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -61,7 +61,7 @@ impl Policy {
|
||||
decision,
|
||||
matched_rules,
|
||||
},
|
||||
None => Evaluation::NoMatch,
|
||||
None => Evaluation::NoMatch {},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ impl Policy {
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum Evaluation {
|
||||
NoMatch,
|
||||
NoMatch {},
|
||||
Match {
|
||||
decision: Decision,
|
||||
#[serde(rename = "matchedRules")]
|
||||
|
||||
@@ -288,7 +288,7 @@ prefix_rule(
|
||||
"color.status=always",
|
||||
"status",
|
||||
]));
|
||||
assert_eq!(Evaluation::NoMatch, no_match_eval);
|
||||
assert_eq!(Evaluation::NoMatch {}, no_match_eval);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user