mirror of
https://github.com/openai/codex.git
synced 2026-02-07 09:23:47 +00:00
Compare commits
22 Commits
shell-tool
...
subagents-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4cfca3c5ce | ||
|
|
2b0d67f1a7 | ||
|
|
da7d6f1abb | ||
|
|
beb71f4a00 | ||
|
|
3bdcbc7292 | ||
|
|
a0434bbdb4 | ||
|
|
d5f661c91d | ||
|
|
8ecaad948b | ||
|
|
aa4e0d823e | ||
|
|
0e051644a9 | ||
|
|
40d14c0756 | ||
|
|
af65666561 | ||
|
|
2ae1f81d84 | ||
|
|
d363a0968e | ||
|
|
bce030ddb5 | ||
|
|
f4af6e389e | ||
|
|
b315b22f7b | ||
|
|
c9e149fd5c | ||
|
|
bacdc004be | ||
|
|
ab5972d447 | ||
|
|
767b66f407 | ||
|
|
830ab4ce20 |
1
.github/workflows/issue-deduplicator.yml
vendored
1
.github/workflows/issue-deduplicator.yml
vendored
@@ -46,7 +46,6 @@ jobs:
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
model: gpt-5.1
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
|
||||
22
.github/workflows/rust-release.yml
vendored
22
.github/workflows/rust-release.yml
vendored
@@ -371,8 +371,20 @@ jobs:
|
||||
path: |
|
||||
codex-rs/dist/${{ matrix.target }}/*
|
||||
|
||||
shell-tool-mcp:
|
||||
name: shell-tool-mcp
|
||||
needs: tag-check
|
||||
uses: ./.github/workflows/shell-tool-mcp.yml
|
||||
with:
|
||||
release-tag: ${{ github.ref_name }}
|
||||
# We are not ready to publish yet.
|
||||
publish: false
|
||||
secrets: inherit
|
||||
|
||||
release:
|
||||
needs: build
|
||||
needs:
|
||||
- build
|
||||
- shell-tool-mcp
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -395,6 +407,14 @@ jobs:
|
||||
- name: List
|
||||
run: ls -R dist/
|
||||
|
||||
# This is a temporary fix: we should modify shell-tool-mcp.yml so these
|
||||
# files do not end up in dist/ in the first place.
|
||||
- name: Delete entries from dist/ that should not go in the release
|
||||
run: |
|
||||
rm -rf dist/shell-tool-mcp*
|
||||
|
||||
ls -R dist/
|
||||
|
||||
- name: Define release name
|
||||
id: release_name
|
||||
run: |
|
||||
|
||||
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: shell-tool-mcp CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: "pnpm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Format check
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run format
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp test
|
||||
|
||||
- name: Build
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
412
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
412
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
@@ -0,0 +1,412 @@
|
||||
name: shell-tool-mcp
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release-version:
|
||||
description: Version to publish (x.y.z or x.y.z-alpha.N). Defaults to GITHUB_REF_NAME when it starts with rust-v.
|
||||
required: false
|
||||
type: string
|
||||
release-tag:
|
||||
description: Tag name to use when downloading release artifacts (defaults to rust-v<version>).
|
||||
required: false
|
||||
type: string
|
||||
publish:
|
||||
description: Whether to publish to npm when the version is releasable.
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
metadata:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.compute.outputs.version }}
|
||||
release_tag: ${{ steps.compute.outputs.release_tag }}
|
||||
should_publish: ${{ steps.compute.outputs.should_publish }}
|
||||
npm_tag: ${{ steps.compute.outputs.npm_tag }}
|
||||
steps:
|
||||
- name: Compute version and tags
|
||||
id: compute
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
version="${{ inputs.release-version }}"
|
||||
release_tag="${{ inputs.release-tag }}"
|
||||
|
||||
if [[ -z "$version" ]]; then
|
||||
if [[ -n "$release_tag" && "$release_tag" =~ ^rust-v.+ ]]; then
|
||||
version="${release_tag#rust-v}"
|
||||
elif [[ "${GITHUB_REF_NAME:-}" =~ ^rust-v.+ ]]; then
|
||||
version="${GITHUB_REF_NAME#rust-v}"
|
||||
release_tag="${GITHUB_REF_NAME}"
|
||||
else
|
||||
echo "release-version is required when GITHUB_REF_NAME is not a rust-v tag."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$release_tag" ]]; then
|
||||
release_tag="rust-v${version}"
|
||||
fi
|
||||
|
||||
npm_tag=""
|
||||
should_publish="false"
|
||||
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
elif [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
npm_tag="alpha"
|
||||
fi
|
||||
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
rust-binaries:
|
||||
name: Build Rust - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
install_musl: true
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
install_musl: true
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Install musl build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y musl-tools pkg-config
|
||||
|
||||
- name: Build exec server binaries
|
||||
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
|
||||
|
||||
- name: Stage exec server binaries
|
||||
run: |
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
|
||||
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-rust-${{ matrix.target }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-linux:
|
||||
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: ubuntu:24.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: ubuntu:22.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: ubuntu:20.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: debian:12
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: debian:11
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: arm64v8/ubuntu:24.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: arm64v8/ubuntu:22.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: arm64v8/ubuntu:20.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: arm64v8/debian:12
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: arm64v8/debian:11
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
steps:
|
||||
- name: Install build prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
else
|
||||
echo "Unsupported package manager in container"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-darwin:
|
||||
name: Build Bash (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-15
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-14
|
||||
- runner: macos-13
|
||||
target: x86_64-apple-darwin
|
||||
variant: macos-13
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
package:
|
||||
name: Package npm module
|
||||
needs:
|
||||
- metadata
|
||||
- rust-binaries
|
||||
- bash-linux
|
||||
- bash-darwin
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build (shell-tool-mcp)
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Assemble staging directory
|
||||
id: staging
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${STAGING_DIR}"
|
||||
mkdir -p "$staging" "$staging/vendor"
|
||||
cp shell-tool-mcp/README.md "$staging/"
|
||||
cp shell-tool-mcp/package.json "$staging/"
|
||||
cp -R shell-tool-mcp/bin "$staging/"
|
||||
|
||||
found_vendor="false"
|
||||
shopt -s nullglob
|
||||
for vendor_dir in artifacts/*/vendor; do
|
||||
rsync -av "$vendor_dir/" "$staging/vendor/"
|
||||
found_vendor="true"
|
||||
done
|
||||
if [[ "$found_vendor" == "false" ]]; then
|
||||
echo "No vendor payloads were downloaded."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
node - <<'NODE'
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
const stagingDir = process.env.STAGING_DIR;
|
||||
const version = process.env.PACKAGE_VERSION;
|
||||
const pkgPath = path.join(stagingDir, "package.json");
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
|
||||
pkg.version = version;
|
||||
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n");
|
||||
NODE
|
||||
|
||||
echo "dir=$staging" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
STAGING_DIR: ${{ runner.temp }}/shell-tool-mcp
|
||||
|
||||
- name: Ensure binaries are executable
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
chmod +x \
|
||||
"$staging"/vendor/*/codex-exec-mcp-server \
|
||||
"$staging"/vendor/*/codex-execve-wrapper \
|
||||
"$staging"/vendor/*/bash/*/bash
|
||||
|
||||
- name: Create npm tarball
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p dist/npm
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
|
||||
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
|
||||
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
|
||||
if-no-files-found: error
|
||||
|
||||
publish:
|
||||
name: Publish npm package
|
||||
needs:
|
||||
- metadata
|
||||
- package
|
||||
if: ${{ inputs.publish && needs.metadata.outputs.should_publish == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
registry-url: https://registry.npmjs.org
|
||||
scope: "@openai"
|
||||
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm
|
||||
|
||||
- name: Publish to npm
|
||||
env:
|
||||
NPM_TAG: ${{ needs.metadata.outputs.npm_tag }}
|
||||
VERSION: ${{ needs.metadata.outputs.version }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tag_args=()
|
||||
if [[ -n "${NPM_TAG}" ]]; then
|
||||
tag_args+=(--tag "${NPM_TAG}")
|
||||
fi
|
||||
npm publish "dist/npm/codex-shell-tool-mcp-npm-${VERSION}.tgz" "${tag_args[@]}"
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -85,3 +85,5 @@ CHANGELOG.ignore.md
|
||||
# nix related
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
plans/
|
||||
11
codex-rs/Cargo.lock
generated
11
codex-rs/Cargo.lock
generated
@@ -187,8 +187,10 @@ dependencies = [
|
||||
"codex-app-server-protocol",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"core_test_support",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tokio",
|
||||
"uuid",
|
||||
"wiremock",
|
||||
@@ -868,6 +870,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"shlex",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"toml",
|
||||
@@ -1123,16 +1126,19 @@ dependencies = [
|
||||
"http",
|
||||
"image",
|
||||
"indexmap 2.12.0",
|
||||
"insta",
|
||||
"keyring",
|
||||
"landlock",
|
||||
"libc",
|
||||
"maplit",
|
||||
"mcp-types",
|
||||
"once_cell",
|
||||
"openssl-sys",
|
||||
"os_info",
|
||||
"predicates",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.2",
|
||||
"regex",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
"seccompiler",
|
||||
@@ -1158,6 +1164,7 @@ dependencies = [
|
||||
"tracing-test",
|
||||
"tree-sitter",
|
||||
"tree-sitter-bash",
|
||||
"url",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
"which",
|
||||
@@ -1776,6 +1783,7 @@ dependencies = [
|
||||
"notify",
|
||||
"regex-lite",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"walkdir",
|
||||
@@ -3328,6 +3336,7 @@ checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0"
|
||||
dependencies = [
|
||||
"console",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"similar",
|
||||
]
|
||||
|
||||
@@ -3737,11 +3746,13 @@ dependencies = [
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-mcp-server",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"os_info",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"tokio",
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
@@ -146,7 +146,7 @@ mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
@@ -165,6 +165,7 @@ rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex-lite = "0.1.7"
|
||||
regex = "1.11.1"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.8.5", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
@@ -267,7 +268,8 @@ ignored = [
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
opt-level = "z" # or "s" (z is smaller)
|
||||
lto = "thin" # "fat" can be smaller sometimes; test both
|
||||
# Because we bundle some of these executables with the TypeScript CLI, we
|
||||
# remove everything to make the binary as small as possible.
|
||||
strip = "symbols"
|
||||
|
||||
@@ -15,6 +15,7 @@ use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
use codex_protocol::protocol::SessionSource as CoreSessionSource;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
use mcp_types::ContentBlock as McpContentBlock;
|
||||
use schemars::JsonSchema;
|
||||
@@ -259,6 +260,56 @@ pub enum CommandAction {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase", export_to = "v2/")]
|
||||
#[derive(Default)]
|
||||
pub enum SessionSource {
|
||||
Cli,
|
||||
#[serde(rename = "vscode")]
|
||||
#[ts(rename = "vscode")]
|
||||
#[default]
|
||||
VsCode,
|
||||
Exec,
|
||||
AppServer,
|
||||
#[serde(other)]
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl From<CoreSessionSource> for SessionSource {
|
||||
fn from(value: CoreSessionSource) -> Self {
|
||||
match value {
|
||||
CoreSessionSource::Cli => SessionSource::Cli,
|
||||
CoreSessionSource::VSCode => SessionSource::VsCode,
|
||||
CoreSessionSource::Exec => SessionSource::Exec,
|
||||
CoreSessionSource::Mcp => SessionSource::AppServer,
|
||||
CoreSessionSource::SubAgent(_) => SessionSource::Unknown,
|
||||
CoreSessionSource::Unknown => SessionSource::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SessionSource> for CoreSessionSource {
|
||||
fn from(value: SessionSource) -> Self {
|
||||
match value {
|
||||
SessionSource::Cli => CoreSessionSource::Cli,
|
||||
SessionSource::VsCode => CoreSessionSource::VSCode,
|
||||
SessionSource::Exec => CoreSessionSource::Exec,
|
||||
SessionSource::AppServer => CoreSessionSource::Mcp,
|
||||
SessionSource::Unknown => CoreSessionSource::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct GitInfo {
|
||||
pub sha: Option<String>,
|
||||
pub branch: Option<String>,
|
||||
pub origin_url: Option<String>,
|
||||
}
|
||||
|
||||
impl CommandAction {
|
||||
pub fn into_core(self) -> CoreParsedCommand {
|
||||
match self {
|
||||
@@ -581,11 +632,20 @@ pub struct Thread {
|
||||
pub id: String,
|
||||
/// Usually the first user message in the thread, if available.
|
||||
pub preview: String,
|
||||
/// Model provider used for this thread (for example, 'openai').
|
||||
pub model_provider: String,
|
||||
/// Unix timestamp (in seconds) when the thread was created.
|
||||
pub created_at: i64,
|
||||
/// [UNSTABLE] Path to the thread on disk.
|
||||
pub path: PathBuf,
|
||||
/// Working directory captured for the thread.
|
||||
pub cwd: PathBuf,
|
||||
/// Version of the CLI that created the thread.
|
||||
pub cli_version: String,
|
||||
/// Origin of the thread (CLI, VSCode, codex exec, codex app-server, etc.).
|
||||
pub source: SessionSource,
|
||||
/// Optional Git metadata captured when the thread was created.
|
||||
pub git_info: Option<GitInfo>,
|
||||
/// Only populated on a `thread/resume` response.
|
||||
/// For all other responses and notifications returning a Thread,
|
||||
/// the turns field will be an empty list.
|
||||
@@ -859,6 +919,7 @@ pub enum CommandExecutionStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Declined,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -53,3 +53,4 @@ serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
- [Initialization](#initialization)
|
||||
- [Core primitives](#core-primitives)
|
||||
- [Thread & turn endpoints](#thread--turn-endpoints)
|
||||
- [Events (work-in-progress)](#events-work-in-progress)
|
||||
- [Auth endpoints](#auth-endpoints)
|
||||
- [Events (work-in-progress)](#v2-streaming-events-work-in-progress)
|
||||
|
||||
## Protocol
|
||||
|
||||
@@ -234,6 +234,90 @@ When the reviewer finishes, the server emits `item/completed` containing the sam
|
||||
|
||||
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::CodeReview` in the generated schema). Use this notification to render the reviewer output in your client.
|
||||
|
||||
## Events (work-in-progress)
|
||||
|
||||
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
|
||||
|
||||
### Turn events
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` plus token `usage`), and clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
|
||||
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
|
||||
|
||||
#### Thread items
|
||||
|
||||
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
|
||||
- `userMessage` — `{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
|
||||
- `agentMessage` — `{id, text}` containing the accumulated agent reply.
|
||||
- `reasoning` — `{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
|
||||
- `commandExecution` — `{id, command, cwd, status, commandActions, aggregatedOutput?, exitCode?, durationMs?}` for sandboxed commands; `status` is `inProgress`, `completed`, `failed`, or `declined`.
|
||||
- `fileChange` — `{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
|
||||
- `mcpToolCall` — `{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
|
||||
- `webSearch` — `{id, query}` for a web search request issued by the agent.
|
||||
|
||||
All items emit two shared lifecycle events:
|
||||
- `item/started` — emits the full `item` when a new unit of work begins so the UI can render it immediately; the `item.id` in this payload matches the `itemId` used by deltas.
|
||||
- `item/completed` — sends the final `item` once that work finishes (e.g., after a tool call or message completes); treat this as the authoritative state.
|
||||
|
||||
There are additional item-specific events:
|
||||
#### agentMessage
|
||||
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
|
||||
#### reasoning
|
||||
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
|
||||
- `item/reasoning/summaryPartAdded` — marks the boundary between reasoning summary sections for an `itemId`; subsequent `summaryTextDelta` entries share the same `summaryIndex`.
|
||||
- `item/reasoning/textDelta` — streams raw reasoning text (only applicable for e.g. open source models); use `contentIndex` to group deltas that belong together before showing them in the UI.
|
||||
#### commandExecution
|
||||
- `item/commandExecution/outputDelta` — streams stdout/stderr for the command; append deltas in order to render live output alongside `aggregatedOutput` in the final item.
|
||||
Final `commandExecution` items include parsed `commandActions`, `status`, `exitCode`, and `durationMs` so the UI can summarize what ran and whether it succeeded.
|
||||
#### fileChange
|
||||
`fileChange` items contain a `changes` list with `{path, kind, diff}` entries (`kind` is `add`, `delete`, or `update` with an optional `movePath`). The `status` tracks whether apply succeeded (`completed`), failed, or was `declined`.
|
||||
|
||||
### Errors
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
- `ContextWindowExceeded`
|
||||
- `UsageLimitExceeded`
|
||||
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
|
||||
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
|
||||
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
|
||||
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
|
||||
- `BadRequest`
|
||||
- `Unauthorized`
|
||||
- `SandboxError`
|
||||
- `InternalServerError`
|
||||
- `Other`: all unclassified errors
|
||||
|
||||
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
|
||||
## Approvals
|
||||
|
||||
Certain actions (shell commands or modifying files) may require explicit user approval depending on the user's config. When `turn/start` is used, the app-server drives an approval flow by sending a server-initiated JSON-RPC request to the client. The client must respond to tell Codex whether to proceed. UIs should present these requests inline with the active turn so users can review the proposed command or diff before choosing.
|
||||
|
||||
- Requests include `threadId` and `turnId`—use them to scope UI state to the active conversation.
|
||||
- Respond with a single `{ "decision": "accept" | "decline" }` payload (plus optional `acceptSettings` on command executions). The server resumes or declines the work and ends the item with `item/completed`.
|
||||
|
||||
### Command execution approvals
|
||||
|
||||
Order of messages:
|
||||
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
|
||||
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
|
||||
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
|
||||
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
|
||||
|
||||
### File change approvals
|
||||
|
||||
Order of messages:
|
||||
1. `item/started` — emits a `fileChange` item with `changes` (diff chunk summaries) and `status: "inProgress"`. Show the proposed edits and paths to the user.
|
||||
2. `item/fileChange/requestApproval` (request) — includes `itemId`, `threadId`, `turnId`, and an optional `reason`.
|
||||
3. Client response — `{ "decision": "accept" }` or `{ "decision": "decline" }`.
|
||||
4. `item/completed` — returns the same `fileChange` item with `status` updated to `completed`, `failed`, or `declined` after the patch attempt. Rely on this to show success/failure and finalize the diff state in your UI.
|
||||
|
||||
UI guidance for IDEs: surface an approval dialog as soon as the request arrives. The turn will proceed after the server receives a response to the approval request. The terminal `item/completed` notification will be sent with the appropriate status.
|
||||
|
||||
## Auth endpoints
|
||||
|
||||
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
|
||||
@@ -329,56 +413,3 @@ Field notes:
|
||||
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
|
||||
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
|
||||
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.
|
||||
|
||||
|
||||
## Events (work-in-progress)
|
||||
|
||||
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
|
||||
|
||||
### Turn events
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` plus token `usage`), and clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
|
||||
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
|
||||
|
||||
#### Errors
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
- `ContextWindowExceeded`
|
||||
- `UsageLimitExceeded`
|
||||
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
|
||||
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
|
||||
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
|
||||
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
|
||||
- `BadRequest`
|
||||
- `Unauthorized`
|
||||
- `SandboxError`
|
||||
- `InternalServerError`
|
||||
- `Other`: all unclassified errors
|
||||
|
||||
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
|
||||
#### Thread items
|
||||
|
||||
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
|
||||
- `userMessage` — `{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
|
||||
- `agentMessage` — `{id, text}` containing the accumulated agent reply.
|
||||
- `reasoning` — `{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
|
||||
- `mcpToolCall` — `{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
|
||||
- `webSearch` — `{id, query}` for a web search request issued by the agent.
|
||||
|
||||
All items emit two shared lifecycle events:
|
||||
- `item/started` — emits the full `item` when a new unit of work begins so the UI can render it immediately; the `item.id` in this payload matches the `itemId` used by deltas.
|
||||
- `item/completed` — sends the final `item` once that work finishes (e.g., after a tool call or message completes); treat this as the authoritative state.
|
||||
|
||||
There are additional item-specific events:
|
||||
#### agentMessage
|
||||
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
|
||||
#### reasoning
|
||||
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
|
||||
- `item/reasoning/summaryPartAdded` — marks the boundary between reasoning summary sections for an `itemId`; subsequent `summaryTextDelta` entries share the same `summaryIndex`.
|
||||
- `item/reasoning/textDelta` — streams raw reasoning text (only applicable for e.g. open source models); use `contentIndex` to group deltas that belong together before showing them in the UI.
|
||||
|
||||
@@ -175,12 +175,20 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
});
|
||||
}
|
||||
ApiVersion::V2 => {
|
||||
let item_id = call_id.clone();
|
||||
let command_actions = parsed_cmd
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
let command_string = shlex_join(&command);
|
||||
|
||||
let params = CommandExecutionRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: turn_id.clone(),
|
||||
// Until we migrate the core to be aware of a first class CommandExecutionItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
item_id: call_id.clone(),
|
||||
item_id: item_id.clone(),
|
||||
reason,
|
||||
risk: risk.map(V2SandboxCommandAssessment::from),
|
||||
};
|
||||
@@ -190,8 +198,17 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
))
|
||||
.await;
|
||||
tokio::spawn(async move {
|
||||
on_command_execution_request_approval_response(event_id, rx, conversation)
|
||||
.await;
|
||||
on_command_execution_request_approval_response(
|
||||
event_id,
|
||||
item_id,
|
||||
command_string,
|
||||
cwd,
|
||||
command_actions,
|
||||
rx,
|
||||
conversation,
|
||||
outgoing,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
},
|
||||
@@ -370,16 +387,21 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
let item_id = exec_command_begin_event.call_id.clone();
|
||||
let command_actions = exec_command_begin_event
|
||||
.parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
let command = shlex_join(&exec_command_begin_event.command);
|
||||
let cwd = exec_command_begin_event.cwd;
|
||||
|
||||
let item = ThreadItem::CommandExecution {
|
||||
id: exec_command_begin_event.call_id.clone(),
|
||||
command: shlex_join(&exec_command_begin_event.command),
|
||||
cwd: exec_command_begin_event.cwd,
|
||||
id: item_id,
|
||||
command,
|
||||
cwd,
|
||||
status: CommandExecutionStatus::InProgress,
|
||||
command_actions: exec_command_begin_event
|
||||
.parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect(),
|
||||
command_actions,
|
||||
aggregated_output: None,
|
||||
exit_code: None,
|
||||
duration_ms: None,
|
||||
@@ -417,6 +439,10 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
} else {
|
||||
CommandExecutionStatus::Failed
|
||||
};
|
||||
let command_actions = parsed_cmd
|
||||
.into_iter()
|
||||
.map(V2ParsedCommand::from)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let aggregated_output = if aggregated_output.is_empty() {
|
||||
None
|
||||
@@ -431,7 +457,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
command: shlex_join(&command),
|
||||
cwd,
|
||||
status,
|
||||
command_actions: parsed_cmd.into_iter().map(V2ParsedCommand::from).collect(),
|
||||
command_actions,
|
||||
aggregated_output,
|
||||
exit_code: Some(exit_code),
|
||||
duration_ms: Some(duration_ms),
|
||||
@@ -516,6 +542,30 @@ async fn complete_file_change_item(
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn complete_command_execution_item(
|
||||
item_id: String,
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
status: CommandExecutionStatus,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
) {
|
||||
let item = ThreadItem::CommandExecution {
|
||||
id: item_id,
|
||||
command,
|
||||
cwd,
|
||||
status,
|
||||
command_actions,
|
||||
aggregated_output: None,
|
||||
exit_code: None,
|
||||
duration_ms: None,
|
||||
};
|
||||
let notification = ItemCompletedNotification { item };
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn find_and_remove_turn_summary(
|
||||
conversation_id: ConversationId,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
@@ -765,42 +815,68 @@ async fn on_file_change_request_approval_response(
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_command_execution_request_approval_response(
|
||||
event_id: String,
|
||||
item_id: String,
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexConversation>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
Ok(value) => value,
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(value) => {
|
||||
let response = serde_json::from_value::<CommandExecutionRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
}
|
||||
});
|
||||
|
||||
let CommandExecutionRequestApprovalResponse {
|
||||
decision,
|
||||
accept_settings,
|
||||
} = response;
|
||||
|
||||
let (decision, completion_status) = match (decision, accept_settings) {
|
||||
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
|
||||
(ReviewDecision::ApprovedForSession, None)
|
||||
}
|
||||
(ApprovalDecision::Accept, _) => (ReviewDecision::Approved, None),
|
||||
(ApprovalDecision::Decline, _) => (
|
||||
ReviewDecision::Denied,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
(ApprovalDecision::Cancel, _) => (
|
||||
ReviewDecision::Abort,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
};
|
||||
(decision, completion_status)
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
return;
|
||||
(ReviewDecision::Denied, Some(CommandExecutionStatus::Failed))
|
||||
}
|
||||
};
|
||||
|
||||
let response = serde_json::from_value::<CommandExecutionRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
}
|
||||
});
|
||||
if let Some(status) = completion_status {
|
||||
complete_command_execution_item(
|
||||
item_id.clone(),
|
||||
command.clone(),
|
||||
cwd.clone(),
|
||||
command_actions.clone(),
|
||||
status,
|
||||
outgoing.as_ref(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let CommandExecutionRequestApprovalResponse {
|
||||
decision,
|
||||
accept_settings,
|
||||
} = response;
|
||||
|
||||
let decision = match (decision, accept_settings) {
|
||||
(ApprovalDecision::Accept, Some(settings)) if settings.for_session => {
|
||||
ReviewDecision::ApprovedForSession
|
||||
}
|
||||
(ApprovalDecision::Accept, _) => ReviewDecision::Approved,
|
||||
(ApprovalDecision::Decline, _) => ReviewDecision::Denied,
|
||||
(ApprovalDecision::Cancel, _) => ReviewDecision::Abort,
|
||||
};
|
||||
if let Err(err) = conversation
|
||||
.submit(Op::ExecApproval {
|
||||
id: event_id,
|
||||
|
||||
@@ -39,6 +39,7 @@ use codex_app_server_protocol::GetConversationSummaryResponse;
|
||||
use codex_app_server_protocol::GetUserAgentResponse;
|
||||
use codex_app_server_protocol::GetUserSavedConfigResponse;
|
||||
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::InputItem as WireInputItem;
|
||||
use codex_app_server_protocol::InterruptConversationParams;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
@@ -131,7 +132,7 @@ use codex_protocol::ConversationId;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::GitInfo;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
@@ -1175,8 +1176,6 @@ impl CodexMessageProcessor {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let effective_policy = params
|
||||
@@ -2933,7 +2932,7 @@ fn extract_conversation_summary(
|
||||
path: PathBuf,
|
||||
head: &[serde_json::Value],
|
||||
session_meta: &SessionMeta,
|
||||
git: Option<&GitInfo>,
|
||||
git: Option<&CoreGitInfo>,
|
||||
fallback_provider: &str,
|
||||
) -> Option<ConversationSummary> {
|
||||
let preview = head
|
||||
@@ -2974,7 +2973,7 @@ fn extract_conversation_summary(
|
||||
})
|
||||
}
|
||||
|
||||
fn map_git_info(git_info: &GitInfo) -> ConversationGitInfo {
|
||||
fn map_git_info(git_info: &CoreGitInfo) -> ConversationGitInfo {
|
||||
ConversationGitInfo {
|
||||
sha: git_info.commit_hash.clone(),
|
||||
branch: git_info.branch.clone(),
|
||||
@@ -2997,10 +2996,18 @@ fn summary_to_thread(summary: ConversationSummary) -> Thread {
|
||||
preview,
|
||||
timestamp,
|
||||
model_provider,
|
||||
..
|
||||
cwd,
|
||||
cli_version,
|
||||
source,
|
||||
git_info,
|
||||
} = summary;
|
||||
|
||||
let created_at = parse_datetime(timestamp.as_deref());
|
||||
let git_info = git_info.map(|info| ApiGitInfo {
|
||||
sha: info.sha,
|
||||
branch: info.branch,
|
||||
origin_url: info.origin_url,
|
||||
});
|
||||
|
||||
Thread {
|
||||
id: conversation_id.to_string(),
|
||||
@@ -3008,6 +3015,10 @@ fn summary_to_thread(summary: ConversationSummary) -> Thread {
|
||||
model_provider,
|
||||
created_at: created_at.map(|dt| dt.timestamp()).unwrap_or(0),
|
||||
path,
|
||||
cwd,
|
||||
cli_version,
|
||||
source: source.into(),
|
||||
git_info,
|
||||
turns: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,3 +24,5 @@ tokio = { workspace = true, features = [
|
||||
] }
|
||||
uuid = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
core_test_support = { path = "../../../core/tests/common" }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -9,12 +9,14 @@ pub use auth_fixtures::ChatGptIdTokenClaims;
|
||||
pub use auth_fixtures::encode_id_token;
|
||||
pub use auth_fixtures::write_chatgpt_auth;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
pub use core_test_support::format_with_current_shell;
|
||||
pub use core_test_support::format_with_current_shell_display;
|
||||
pub use mcp_process::McpProcess;
|
||||
pub use mock_model_server::create_mock_chat_completions_server;
|
||||
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
||||
pub use responses::create_apply_patch_sse_response;
|
||||
pub use responses::create_final_assistant_message_sse_response;
|
||||
pub use responses::create_shell_sse_response;
|
||||
pub use responses::create_shell_command_sse_response;
|
||||
pub use rollout::create_fake_rollout;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn create_shell_sse_response(
|
||||
pub fn create_shell_command_sse_response(
|
||||
command: Vec<String>,
|
||||
workdir: Option<&Path>,
|
||||
timeout_ms: Option<u64>,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// The `arguments`` for the `shell` tool is a serialized JSON object.
|
||||
// The `arguments` for the `shell_command` tool is a serialized JSON object.
|
||||
let command_str = shlex::try_join(command.iter().map(String::as_str))?;
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": command,
|
||||
"command": command_str,
|
||||
"workdir": workdir.map(|w| w.to_string_lossy()),
|
||||
"timeout": timeout_ms
|
||||
"timeout_ms": timeout_ms
|
||||
}))?;
|
||||
let tool_call = json!({
|
||||
"choices": [
|
||||
@@ -21,7 +22,7 @@ pub fn create_shell_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
@@ -62,10 +63,10 @@ pub fn create_apply_patch_sse_response(
|
||||
patch_content: &str,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// Use shell command to call apply_patch with heredoc format
|
||||
let shell_command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
// Use shell_command to call apply_patch with heredoc format
|
||||
let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": ["bash", "-lc", shell_command]
|
||||
"command": command
|
||||
}))?;
|
||||
|
||||
let tool_call = json!({
|
||||
@@ -76,7 +77,7 @@ pub fn create_apply_patch_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use anyhow::Result;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::GitInfo;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use serde_json::json;
|
||||
use std::fs;
|
||||
@@ -22,6 +24,7 @@ pub fn create_fake_rollout(
|
||||
meta_rfc3339: &str,
|
||||
preview: &str,
|
||||
model_provider: Option<&str>,
|
||||
git_info: Option<GitInfo>,
|
||||
) -> Result<String> {
|
||||
let uuid = Uuid::new_v4();
|
||||
let uuid_str = uuid.to_string();
|
||||
@@ -37,7 +40,7 @@ pub fn create_fake_rollout(
|
||||
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
|
||||
|
||||
// Build JSONL lines
|
||||
let payload = serde_json::to_value(SessionMeta {
|
||||
let meta = SessionMeta {
|
||||
id: conversation_id,
|
||||
timestamp: meta_rfc3339.to_string(),
|
||||
cwd: PathBuf::from("/"),
|
||||
@@ -46,6 +49,10 @@ pub fn create_fake_rollout(
|
||||
instructions: None,
|
||||
source: SessionSource::Cli,
|
||||
model_provider: model_provider.map(str::to_string),
|
||||
};
|
||||
let payload = serde_json::to_value(SessionMetaLine {
|
||||
meta,
|
||||
git: git_info,
|
||||
})?;
|
||||
|
||||
let lines = [
|
||||
|
||||
@@ -2,7 +2,8 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||
@@ -56,7 +57,7 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
|
||||
// Create a mock model server that immediately ends each turn.
|
||||
// Two turns are expected: initial session configure + one user message.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec!["ls".to_string()],
|
||||
Some(&working_directory),
|
||||
Some(5000),
|
||||
@@ -175,7 +176,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
|
||||
// Mock server will request a python shell call for the first and second turn, then finish.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -186,7 +187,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -267,11 +268,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
call_id: "call1".to_string(),
|
||||
command: vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
command: format_with_current_shell("python3 -c 'print(42)'"),
|
||||
cwd: working_directory.clone(),
|
||||
reason: None,
|
||||
risk: None,
|
||||
@@ -353,23 +350,15 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -481,13 +470,9 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
exec_begin.cwd, second_cwd,
|
||||
"exec turn should run from updated cwd"
|
||||
);
|
||||
let expected_command = format_with_current_shell("echo second turn");
|
||||
assert_eq!(
|
||||
exec_begin.command,
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string()
|
||||
],
|
||||
exec_begin.command, expected_command,
|
||||
"exec turn should run expected command"
|
||||
);
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use tokio::time::timeout;
|
||||
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
@@ -56,7 +56,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Create mock server with a single SSE response: the long sleep command
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000), // 10 seconds timeout in ms
|
||||
|
||||
@@ -31,6 +31,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-02T12:00:00Z",
|
||||
"Hello A",
|
||||
Some("openai"),
|
||||
None,
|
||||
)?;
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -38,6 +39,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T13:00:00Z",
|
||||
"Hello B",
|
||||
Some("openai"),
|
||||
None,
|
||||
)?;
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -45,6 +47,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T12:00:00Z",
|
||||
"Hello C",
|
||||
None,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -105,6 +108,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T11:30:00Z",
|
||||
"Hello TP",
|
||||
Some("test-provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
// Filtering by model provider should return only matching sessions.
|
||||
|
||||
@@ -2,10 +2,14 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_fake_rollout;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SessionSource;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -24,7 +28,7 @@ async fn thread_list_basic_empty() -> Result<()> {
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: None,
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let list_resp: JSONRPCResponse = timeout(
|
||||
@@ -63,6 +67,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-02T12:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let _b = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -70,6 +75,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-01T13:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let _c = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -77,6 +83,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-01T12:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -104,6 +111,10 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.created_at > 0);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
}
|
||||
let cursor1 = cursor1.expect("expected nextCursor on first page");
|
||||
|
||||
@@ -129,6 +140,10 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.created_at > 0);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
}
|
||||
assert_eq!(cursor2, None, "expected nextCursor to be null on last page");
|
||||
|
||||
@@ -147,6 +162,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
"2025-01-02T10:00:00Z",
|
||||
"X",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?; // mock_provider
|
||||
let _b = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -154,6 +170,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
"2025-01-02T11:00:00Z",
|
||||
"X",
|
||||
Some("other_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -180,6 +197,63 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
assert_eq!(thread.model_provider, "other_provider");
|
||||
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
|
||||
assert_eq!(thread.created_at, expected_ts);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_includes_git_info() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
let git_info = CoreGitInfo {
|
||||
commit_hash: Some("abc123".to_string()),
|
||||
branch: Some("main".to_string()),
|
||||
repository_url: Some("https://example.com/repo.git".to_string()),
|
||||
};
|
||||
let conversation_id = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-02-01T09-00-00",
|
||||
"2025-02-01T09:00:00Z",
|
||||
"Git info preview",
|
||||
Some("mock_provider"),
|
||||
Some(git_info),
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse { data, .. } = to_response::<ThreadListResponse>(resp)?;
|
||||
let thread = data
|
||||
.iter()
|
||||
.find(|t| t.id == conversation_id)
|
||||
.expect("expected thread for created rollout");
|
||||
|
||||
let expected_git = ApiGitInfo {
|
||||
sha: Some("abc123".to_string()),
|
||||
branch: Some("main".to_string()),
|
||||
origin_url: Some("https://example.com/repo.git".to_string()),
|
||||
};
|
||||
assert_eq!(thread.git_info, Some(expected_git));
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SessionSource;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
@@ -14,6 +15,7 @@ use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInput;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -75,6 +77,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
|
||||
"2025-01-05T12:00:00Z",
|
||||
preview,
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -97,6 +100,10 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
|
||||
assert_eq!(thread.preview, preview);
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.path.is_absolute());
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
|
||||
assert_eq!(
|
||||
thread.turns.len(),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
@@ -41,7 +41,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Mock server: long-running shell command then (after abort) nothing else needed.
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000),
|
||||
|
||||
@@ -4,9 +4,11 @@ use app_test_support::create_apply_patch_sse_response;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_mock_chat_completions_server_unchecked;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell_display;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::ItemCompletedNotification;
|
||||
@@ -203,7 +205,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
// Mock server: first turn requests a shell call (elicitation), then completes.
|
||||
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -214,7 +216,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -328,6 +330,145 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_exec_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().to_path_buf();
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-decline",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "run python".to_string(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
let started_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let started_notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(started_notif.params.clone().expect("item/started params"))?;
|
||||
if let ThreadItem::CommandExecution { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution { id, status, .. } = started_command_execution else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
)
|
||||
.await??;
|
||||
let ServerRequest::CommandExecutionRequestApproval { request_id, params } = server_req else {
|
||||
panic!("expected CommandExecutionRequestApproval request")
|
||||
};
|
||||
assert_eq!(params.item_id, "call-decline");
|
||||
assert_eq!(params.thread_id, thread.id);
|
||||
assert_eq!(params.turn_id, turn.id);
|
||||
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
accept_settings: None,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let completed_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let completed_notif = mcp
|
||||
.read_stream_until_notification_message("item/completed")
|
||||
.await?;
|
||||
let completed: ItemCompletedNotification = serde_json::from_value(
|
||||
completed_notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/completed params"),
|
||||
)?;
|
||||
if let ThreadItem::CommandExecution { .. } = completed.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(completed.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
status,
|
||||
exit_code,
|
||||
aggregated_output,
|
||||
..
|
||||
} = completed_command_execution
|
||||
else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::Declined);
|
||||
assert!(exit_code.is_none());
|
||||
assert!(aggregated_output.is_none());
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -343,23 +484,15 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -465,7 +598,8 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(cwd, second_cwd);
|
||||
assert_eq!(command, "bash -lc 'echo second turn'");
|
||||
let expected_command = format_with_current_shell_display("echo second turn");
|
||||
assert_eq!(command, expected_command);
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
timeout(
|
||||
@@ -480,6 +614,10 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
if cfg!(windows) {
|
||||
// TODO apply_patch approvals are not parsed from powershell commands yet
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
@@ -626,6 +764,10 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
if cfg!(windows) {
|
||||
// TODO apply_patch approvals are not parsed from powershell commands yet
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
|
||||
@@ -30,6 +30,7 @@ pub use standalone_executable::main;
|
||||
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
|
||||
|
||||
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
|
||||
const APPLY_PATCH_SHELLS: [&str; 3] = ["bash", "zsh", "sh"];
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ApplyPatchError {
|
||||
@@ -96,6 +97,13 @@ pub struct ApplyPatchArgs {
|
||||
pub workdir: Option<String>,
|
||||
}
|
||||
|
||||
fn shell_supports_apply_patch(shell: &str) -> bool {
|
||||
std::path::Path::new(shell)
|
||||
.file_name()
|
||||
.and_then(|name| name.to_str())
|
||||
.is_some_and(|name| APPLY_PATCH_SHELLS.contains(&name))
|
||||
}
|
||||
|
||||
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
match argv {
|
||||
// Direct invocation: apply_patch <patch>
|
||||
@@ -104,7 +112,7 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
Err(e) => MaybeApplyPatch::PatchParseError(e),
|
||||
},
|
||||
// Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
[shell, flag, script] if shell_supports_apply_patch(shell) && flag == "-lc" => {
|
||||
match extract_apply_patch_from_bash(script) {
|
||||
Ok((body, workdir)) => match parse_patch(&body) {
|
||||
Ok(mut source) => {
|
||||
@@ -224,12 +232,12 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
|
||||
);
|
||||
}
|
||||
}
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
if parse_patch(script).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
[shell, flag, script]
|
||||
if shell_supports_apply_patch(shell)
|
||||
&& flag == "-lc"
|
||||
&& parse_patch(script).is_ok() =>
|
||||
{
|
||||
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@@ -56,6 +56,9 @@ sha2 = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
similar = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
url = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
test-case = "3.3.1"
|
||||
test-log = { workspace = true }
|
||||
@@ -118,6 +121,7 @@ image = { workspace = true, features = ["jpeg", "png"] }
|
||||
maplit = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
insta = { version = "1.39", features = ["yaml"] }
|
||||
serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
|
||||
29
codex-rs/core/root_agent_prompt.md
Normal file
29
codex-rs/core/root_agent_prompt.md
Normal file
@@ -0,0 +1,29 @@
|
||||
You are the **root agent** in a multi‑agent Codex session.
|
||||
|
||||
Your job is to solve the user’s task end‑to‑end. Use subagents as semi‑autonomous workers when that makes the work simpler, safer, or more parallel, and otherwise act directly in the conversation as a normal assistant.
|
||||
|
||||
Subagent behavior and limits are configured via `config.toml` knobs documented under the [feature flags section](../../docs/config.md#feature-flags). Enable the `subagent_tools` feature flag there before relying on the helpers, then tune the following settings:
|
||||
|
||||
- `max_active_subagents` (`../../docs/config.md#max_active_subagents`) caps how many subagent sessions may run concurrently so you keep CPU/memory demand bounded.
|
||||
- `root_agent_uses_user_messages` (`../../docs/config.md#root_agent_uses_user_messages`) controls whether the child sees your `subagent_send_message` text as a normal user turn or must read it from the tool output.
|
||||
- `subagent_root_inbox_autosubmit` (`../../docs/config.md#subagent_root_inbox_autosubmit`) determines whether the root automatically drains its inbox and optionally starts follow-up turns when messages arrive.
|
||||
- `subagent_inbox_inject_before_tools` (`../../docs/config.md#subagent_inbox_inject_before_tools`) chooses whether synthetic `subagent_await` calls are recorded before or after the real tool outputs for a turn.
|
||||
|
||||
Use subagents as follows:
|
||||
|
||||
- Spawn or fork a subagent when a piece of work can be isolated behind a clear prompt, or when you want an independent view on a problem.
|
||||
- Let subagents run independently. You do not need to keep generating output while they work; focus your own turns on planning, orchestration, and integrating results.
|
||||
- Use `subagent_send_message` to give a subagent follow-up instructions, send it short status updates or summaries, or interrupt and redirect it.
|
||||
- Use `subagent_await` when you need to wait for a particular subagent before continuing; you do not have to await every subagent you spawn, because they can also report progress and results to you via `subagent_send_message` and completions will be surfaced to you automatically.
|
||||
- When you see a `subagent_await` call/output injected into the transcript without you calling the tool, that came from the autosubmit path: the system drained the inbox (e.g., a subagent completion) while the root was idle and recorded a synthetic `subagent_await` so you can read and react without issuing the tool yourself (controlled by `subagent_root_inbox_autosubmit` in `config.toml`).
|
||||
- Use `subagent_logs` when you only need to inspect what a subagent has been doing recently, not to change its state.
|
||||
- Use `subagent_list`, `subagent_prune`, and `subagent_cancel` to keep the set of active subagents small and relevant.
|
||||
- When you spawn a subagent or start a watchdog and there’s nothing else useful to do, issue the tool call right away and say you’re waiting for results (or for the watchdog to start). If you can do other useful work in parallel, do that instead of stalling, and only await when necessary.
|
||||
|
||||
Be concise and direct. Delegate multi‑step or long‑running work to subagents, summarize what they have done for the user, and always keep the conversation focused on the user’s goal.
|
||||
|
||||
**Example: long‑running supervision with a watchdog**
|
||||
- Spawn a supervisor to own `PLAN.md`: e.g., `subagent_spawn` label `supervisor`, prompt it to keep the plan fresh, launch workers, and heartbeat every few minutes.
|
||||
- Attach a watchdog to the supervisor (or to yourself) that pings on a cadence and asks for progress: call `subagent_watchdog` with `{agent_id: <supervisor_id>, interval_s: 300, message: "Watchdog ping — report current status and PLAN progress", cancel: false}`.
|
||||
- The supervisor should reply to each ping with a brief status and, if needed, spawn/interrupt workers; the root can cancel or retarget by invoking `subagent_watchdog` again with `cancel: true`.
|
||||
- You can also set a self‑watchdog on the root agent to ensure you keep emitting status updates during multi‑hour tasks.
|
||||
@@ -46,6 +46,7 @@ use mcp_types::ReadResourceRequestParams;
|
||||
use mcp_types::ReadResourceResult;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
use tokio::fs;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -103,6 +104,8 @@ use crate::shell;
|
||||
use crate::state::ActiveTurn;
|
||||
use crate::state::SessionServices;
|
||||
use crate::state::SessionState;
|
||||
use crate::subagents::SubagentManager;
|
||||
use crate::subagents::SubagentRegistry;
|
||||
use crate::tasks::GhostSnapshotTask;
|
||||
use crate::tasks::ReviewTask;
|
||||
use crate::tasks::SessionTask;
|
||||
@@ -134,12 +137,19 @@ use codex_protocol::user_input::UserInput;
|
||||
use codex_utils_readiness::Readiness;
|
||||
use codex_utils_readiness::ReadinessFlag;
|
||||
|
||||
// Built-in prompts for orchestrating and running subagents. These can be
|
||||
// overridden via files in `$CODEX_HOME` (see `load_root_agent_prompt` and
|
||||
// `load_subagent_prompt`).
|
||||
const ROOT_AGENT_PROMPT_FALLBACK: &str = include_str!("../root_agent_prompt.md");
|
||||
const SUBAGENT_PROMPT_FALLBACK: &str = include_str!("../subagent_prompt.md");
|
||||
|
||||
/// The high-level interface to the Codex system.
|
||||
/// It operates as a queue pair where you send submissions and receive events.
|
||||
pub struct Codex {
|
||||
pub(crate) next_id: AtomicU64,
|
||||
pub(crate) tx_sub: Sender<Submission>,
|
||||
pub(crate) rx_event: Receiver<Event>,
|
||||
pub(crate) conversation_id: ConversationId,
|
||||
}
|
||||
|
||||
/// Wrapper returned by [`Codex::spawn`] containing the spawned [`Codex`],
|
||||
@@ -153,6 +163,31 @@ pub struct CodexSpawnOk {
|
||||
pub(crate) const INITIAL_SUBMIT_ID: &str = "";
|
||||
pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64;
|
||||
|
||||
async fn load_agent_prompt_fallback(fallback: &str, override_filename: &str) -> Option<String> {
|
||||
if let Ok(home) = crate::config::find_codex_home() {
|
||||
let path = home.join(override_filename);
|
||||
if let Ok(contents) = fs::read_to_string(&path).await {
|
||||
let trimmed = contents.trim();
|
||||
if !trimmed.is_empty() {
|
||||
return Some(contents);
|
||||
}
|
||||
}
|
||||
}
|
||||
if fallback.trim().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(fallback.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_root_agent_prompt() -> Option<String> {
|
||||
load_agent_prompt_fallback(ROOT_AGENT_PROMPT_FALLBACK, "AGENTS.root.md").await
|
||||
}
|
||||
|
||||
async fn load_subagent_prompt() -> Option<String> {
|
||||
load_agent_prompt_fallback(SUBAGENT_PROMPT_FALLBACK, "AGENTS.subagent.md").await
|
||||
}
|
||||
|
||||
impl Codex {
|
||||
/// Spawn a new [`Codex`] and initialize the session.
|
||||
pub async fn spawn(
|
||||
@@ -160,6 +195,7 @@ impl Codex {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
conversation_history: InitialHistory,
|
||||
session_source: SessionSource,
|
||||
desired_conversation_id: Option<ConversationId>,
|
||||
) -> CodexResult<CodexSpawnOk> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
@@ -170,6 +206,27 @@ impl Codex {
|
||||
.await
|
||||
.map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?;
|
||||
|
||||
// When subagent tooling is enabled, attach additional developer
|
||||
// instructions that clarify the root vs subagent responsibilities.
|
||||
// Exactly one of these prompts applies to a session:
|
||||
// - Root sessions get `root_agent_prompt`.
|
||||
// - Subagent sessions (spawned or forked) get `subagent_prompt`.
|
||||
let role_prompt = if config.features.enabled(Feature::SubagentTools) {
|
||||
if let SessionSource::SubAgent(_) = session_source {
|
||||
load_subagent_prompt().await
|
||||
} else {
|
||||
load_root_agent_prompt().await
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let developer_instructions = match (role_prompt, config.developer_instructions.clone()) {
|
||||
(None, existing) => existing,
|
||||
(Some(prompt), None) => Some(prompt),
|
||||
(Some(prompt), Some(existing)) => Some(format!("{prompt}\n\n{existing}")),
|
||||
};
|
||||
|
||||
let config = Arc::new(config);
|
||||
|
||||
let session_configuration = SessionConfiguration {
|
||||
@@ -177,7 +234,7 @@ impl Codex {
|
||||
model: config.model.clone(),
|
||||
model_reasoning_effort: config.model_reasoning_effort,
|
||||
model_reasoning_summary: config.model_reasoning_summary,
|
||||
developer_instructions: config.developer_instructions.clone(),
|
||||
developer_instructions,
|
||||
user_instructions,
|
||||
base_instructions: config.base_instructions.clone(),
|
||||
compact_prompt: config.compact_prompt.clone(),
|
||||
@@ -199,6 +256,7 @@ impl Codex {
|
||||
tx_event.clone(),
|
||||
conversation_history,
|
||||
session_source_clone,
|
||||
desired_conversation_id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -213,6 +271,7 @@ impl Codex {
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub,
|
||||
rx_event,
|
||||
conversation_id,
|
||||
};
|
||||
|
||||
Ok(CodexSpawnOk {
|
||||
@@ -221,6 +280,10 @@ impl Codex {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn conversation_id(&self) -> ConversationId {
|
||||
self.conversation_id
|
||||
}
|
||||
|
||||
/// Submit the `op` wrapped in a `Submission` with a unique ID.
|
||||
pub async fn submit(&self, op: Op) -> CodexResult<String> {
|
||||
let id = self
|
||||
@@ -387,6 +450,22 @@ pub(crate) struct SessionSettingsUpdate {
|
||||
}
|
||||
|
||||
impl Session {
|
||||
pub(crate) fn conversation_id(&self) -> ConversationId {
|
||||
self.conversation_id
|
||||
}
|
||||
|
||||
pub(crate) async fn history_len(&self) -> usize {
|
||||
let mut history = {
|
||||
let state = self.state.lock().await;
|
||||
state.clone_history()
|
||||
};
|
||||
history.get_history().len()
|
||||
}
|
||||
|
||||
pub(crate) async fn has_active_turn(&self) -> bool {
|
||||
self.active_turn.lock().await.is_some()
|
||||
}
|
||||
|
||||
fn make_turn_context(
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
otel_event_manager: &OtelEventManager,
|
||||
@@ -455,6 +534,7 @@ impl Session {
|
||||
tx_event: Sender<Event>,
|
||||
initial_history: InitialHistory,
|
||||
session_source: SessionSource,
|
||||
desired_conversation_id: Option<ConversationId>,
|
||||
) -> anyhow::Result<Arc<Self>> {
|
||||
debug!(
|
||||
"Configuring session: model={}; provider={:?}",
|
||||
@@ -469,7 +549,7 @@ impl Session {
|
||||
|
||||
let (conversation_id, rollout_params) = match &initial_history {
|
||||
InitialHistory::New | InitialHistory::Forked(_) => {
|
||||
let conversation_id = ConversationId::default();
|
||||
let conversation_id = desired_conversation_id.unwrap_or_default();
|
||||
(
|
||||
conversation_id,
|
||||
RolloutRecorderParams::new(
|
||||
@@ -543,7 +623,6 @@ impl Session {
|
||||
config.model_reasoning_effort,
|
||||
config.model_reasoning_summary,
|
||||
config.model_context_window,
|
||||
config.model_max_output_tokens,
|
||||
config.model_auto_compact_token_limit,
|
||||
config.approval_policy,
|
||||
config.sandbox_policy.clone(),
|
||||
@@ -554,6 +633,15 @@ impl Session {
|
||||
// Create the mutable state for the Session.
|
||||
let state = SessionState::new(session_configuration.clone());
|
||||
|
||||
let subagent_registry = SubagentRegistry::new();
|
||||
let subagent_manager = SubagentManager::new(
|
||||
Arc::new(subagent_registry.clone()),
|
||||
config.max_active_subagents,
|
||||
config.root_agent_uses_user_messages,
|
||||
config.subagent_root_inbox_autosubmit,
|
||||
config.subagent_inbox_inject_before_tools,
|
||||
);
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
@@ -565,6 +653,8 @@ impl Session {
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_event_manager,
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
subagents: subagent_registry,
|
||||
subagent_manager,
|
||||
};
|
||||
|
||||
let sess = Arc::new(Session {
|
||||
@@ -576,6 +666,9 @@ impl Session {
|
||||
next_internal_sub_id: AtomicU64::new(0),
|
||||
});
|
||||
|
||||
// Register this session so it can be discovered for fork-time subagent reparenting.
|
||||
crate::session_index::register(conversation_id, &sess);
|
||||
|
||||
// Dispatch the SessionConfiguredEvent first and then report any errors.
|
||||
// If resuming, include converted initial messages in the payload so UIs can render them immediately.
|
||||
let initial_messages = initial_history.get_event_msgs();
|
||||
@@ -653,10 +746,13 @@ impl Session {
|
||||
// Ensure initial items are visible to immediate readers (e.g., tests, forks).
|
||||
self.flush_rollout().await;
|
||||
}
|
||||
InitialHistory::Resumed(_) | InitialHistory::Forked(_) => {
|
||||
InitialHistory::Resumed(_) => {
|
||||
let rollout_items = conversation_history.get_rollout_items();
|
||||
let persist = matches!(conversation_history, InitialHistory::Forked(_));
|
||||
|
||||
// Rehydrate subagent registry from persisted lifecycle events so UI/list tools work after resume.
|
||||
self.services
|
||||
.subagent_manager
|
||||
.import_from_rollout(&rollout_items, self.conversation_id)
|
||||
.await;
|
||||
// If resuming, warn when the last recorded model differs from the current one.
|
||||
if let InitialHistory::Resumed(_) = conversation_history
|
||||
&& let Some(prev) = rollout_items.iter().rev().find_map(|it| {
|
||||
@@ -693,11 +789,34 @@ impl Session {
|
||||
.await;
|
||||
}
|
||||
|
||||
// If persisting, persist all rollout items as-is (recorder filters)
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.flush_rollout().await;
|
||||
}
|
||||
InitialHistory::Forked(_) => {
|
||||
let rollout_items = conversation_history.get_rollout_items();
|
||||
|
||||
// Start from the parent rollout and then, for subagent
|
||||
// sessions only, append a developer message carrying the
|
||||
// subagent-specific prompt so the child can see it at the
|
||||
// fork boundary.
|
||||
let mut reconstructed_history =
|
||||
self.reconstruct_history_from_rollout(&turn_context, &rollout_items);
|
||||
|
||||
if let Some(dev) = turn_context.developer_instructions.as_deref()
|
||||
&& !dev.trim().is_empty()
|
||||
{
|
||||
let dev_item: ResponseItem = DeveloperInstructions::new(dev.to_string()).into();
|
||||
reconstructed_history.push(dev_item);
|
||||
}
|
||||
|
||||
if !reconstructed_history.is_empty() {
|
||||
self.record_into_history(&reconstructed_history, &turn_context)
|
||||
.await;
|
||||
}
|
||||
|
||||
if !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
}
|
||||
// Flush after seeding history and any persisted rollout copy.
|
||||
|
||||
self.flush_rollout().await;
|
||||
}
|
||||
}
|
||||
@@ -1340,6 +1459,12 @@ impl Session {
|
||||
async fn cancel_mcp_startup(&self) {
|
||||
self.services.mcp_startup_cancellation_token.cancel();
|
||||
}
|
||||
|
||||
pub(crate) fn root_inbox_autosubmit_enabled(&self) -> bool {
|
||||
self.services
|
||||
.subagent_manager
|
||||
.root_inbox_autosubmit_enabled()
|
||||
}
|
||||
}
|
||||
|
||||
async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiver<Submission>) {
|
||||
@@ -1830,23 +1955,43 @@ async fn spawn_review_thread(
|
||||
/// - If the model sends only an assistant message, we record it in the
|
||||
/// conversation history and consider the task complete.
|
||||
///
|
||||
fn merge_turn_items_for_recording(
|
||||
outputs_to_record: &[ResponseItem],
|
||||
new_inputs_to_record: &[ResponseItem],
|
||||
inbox_items: &[ResponseItem],
|
||||
inject_before_tools: bool,
|
||||
) -> Vec<ResponseItem> {
|
||||
let mut items = Vec::with_capacity(
|
||||
outputs_to_record.len() + new_inputs_to_record.len() + inbox_items.len(),
|
||||
);
|
||||
items.extend(outputs_to_record.iter().cloned());
|
||||
if inject_before_tools {
|
||||
items.extend(inbox_items.iter().cloned());
|
||||
items.extend(new_inputs_to_record.iter().cloned());
|
||||
} else {
|
||||
items.extend(new_inputs_to_record.iter().cloned());
|
||||
items.extend(inbox_items.iter().cloned());
|
||||
}
|
||||
items
|
||||
}
|
||||
|
||||
pub(crate) async fn run_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
input: Vec<UserInput>,
|
||||
record_user_input: bool,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> Option<String> {
|
||||
if input.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let event = EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
});
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
sess.record_input_and_rollout_usermsg(turn_context.as_ref(), &initial_input_for_turn)
|
||||
.await;
|
||||
if record_user_input && !input.is_empty() {
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
sess.record_input_and_rollout_usermsg(turn_context.as_ref(), &initial_input_for_turn)
|
||||
.await;
|
||||
}
|
||||
|
||||
sess.maybe_start_ghost_snapshot(Arc::clone(&turn_context), cancellation_token.child_token())
|
||||
.await;
|
||||
@@ -1905,8 +2050,36 @@ pub(crate) async fn run_task(
|
||||
let token_limit_reached = total_usage_tokens
|
||||
.map(|tokens| tokens >= limit)
|
||||
.unwrap_or(false);
|
||||
let (responses, items_to_record_in_conversation_history) =
|
||||
process_items(processed_items, &sess, &turn_context).await;
|
||||
let (responses, outputs_to_record, new_inputs_to_record) =
|
||||
process_items(processed_items).await;
|
||||
|
||||
// Drain the root agent inbox and synthesize subagent_await
|
||||
// items at this safe stopping point. Only the root session has
|
||||
// a root inbox; for subagent sessions this will be a no-op.
|
||||
let inbox_items = sess
|
||||
.services
|
||||
.subagent_manager
|
||||
.drain_root_inbox_to_items(&sess.conversation_id())
|
||||
.await;
|
||||
let had_inbox = !inbox_items.is_empty();
|
||||
|
||||
// Assemble the final list of items to record for this turn,
|
||||
// respecting the configured injection order for inbox-derived
|
||||
// synthetic subagent_await calls.
|
||||
let items_to_record_in_conversation_history = merge_turn_items_for_recording(
|
||||
&outputs_to_record,
|
||||
&new_inputs_to_record,
|
||||
&inbox_items,
|
||||
sess.services.subagent_manager.inbox_inject_before_tools(),
|
||||
);
|
||||
|
||||
if !items_to_record_in_conversation_history.is_empty() {
|
||||
sess.record_conversation_items(
|
||||
&turn_context,
|
||||
&items_to_record_in_conversation_history,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// as long as compaction works well in getting us way below the token limit, we shouldn't worry about being in an infinite loop.
|
||||
if token_limit_reached {
|
||||
@@ -1920,6 +2093,15 @@ pub(crate) async fn run_task(
|
||||
}
|
||||
|
||||
if responses.is_empty() {
|
||||
// Hard case: no tool calls to execute. If draining the
|
||||
// inbox produced new synthetic subagent_await items and
|
||||
// autosubmit is enabled, allow the model to react in a
|
||||
// follow-up turn by continuing the loop instead of
|
||||
// treating this as a terminal completion.
|
||||
if sess.root_inbox_autosubmit_enabled() && had_inbox {
|
||||
continue;
|
||||
}
|
||||
|
||||
last_agent_message = get_last_assistant_message_from_turn(
|
||||
&items_to_record_in_conversation_history,
|
||||
);
|
||||
@@ -1938,7 +2120,18 @@ pub(crate) async fn run_task(
|
||||
Err(CodexErr::TurnAborted {
|
||||
dangling_artifacts: processed_items,
|
||||
}) => {
|
||||
let _ = process_items(processed_items, &sess, &turn_context).await;
|
||||
let (_, outputs_to_record, new_inputs_to_record) =
|
||||
process_items(processed_items).await;
|
||||
let mut items_to_record_in_conversation_history = Vec::new();
|
||||
items_to_record_in_conversation_history.extend(outputs_to_record);
|
||||
items_to_record_in_conversation_history.extend(new_inputs_to_record);
|
||||
if !items_to_record_in_conversation_history.is_empty() {
|
||||
sess.record_conversation_items(
|
||||
&turn_context,
|
||||
&items_to_record_in_conversation_history,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
// Aborted turn is reported via a different event.
|
||||
break;
|
||||
}
|
||||
@@ -2411,6 +2604,11 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::subagents::AwaitInboxResult;
|
||||
use crate::subagents::InboxMessage;
|
||||
use crate::subagents::SubagentCompletion;
|
||||
use crate::subagents::SubagentManager;
|
||||
use crate::subagents::SubagentRegistry;
|
||||
use mcp_types::ContentBlock;
|
||||
use mcp_types::TextContent;
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -2462,6 +2660,66 @@ mod tests {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forked_subagent_injects_subagent_developer_instructions() {
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem as ProtocolResponseItem;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
|
||||
// Start from a basic session and then mark it as a subagent session
|
||||
// with explicit developer instructions that stand in for the
|
||||
// subagent prompt.
|
||||
let (session, _tc) = make_session_and_context();
|
||||
tokio_test::block_on(async {
|
||||
let mut state = session.state.lock().await;
|
||||
state.session_configuration.session_source =
|
||||
SessionSource::SubAgent(SubAgentSource::Other("child".to_string()));
|
||||
state.session_configuration.developer_instructions =
|
||||
Some("SUBAGENT_PROMPT".to_string());
|
||||
});
|
||||
|
||||
// Build a minimal forked rollout containing a single user message
|
||||
// from the parent.
|
||||
let parent_msg = ProtocolResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "parent-msg".to_string(),
|
||||
}],
|
||||
};
|
||||
let rollout_items = vec![RolloutItem::ResponseItem(parent_msg)];
|
||||
|
||||
// Seed the forked history; for subagent sessions this should append a
|
||||
// developer message carrying the subagent prompt after the parent
|
||||
// transcript.
|
||||
tokio_test::block_on(session.record_initial_history(InitialHistory::Forked(rollout_items)));
|
||||
|
||||
let history = tokio_test::block_on(async {
|
||||
session.state.lock().await.clone_history().get_history()
|
||||
});
|
||||
|
||||
// Parent message should still be present.
|
||||
assert!(history.iter().any(|item| matches!(
|
||||
item,
|
||||
ResponseItem::Message { role, content, .. }
|
||||
if role == "user" && content.iter().any(|c| matches!(
|
||||
c,
|
||||
ContentItem::InputText { text } if text == "parent-msg"
|
||||
))
|
||||
)));
|
||||
|
||||
// Subagent developer instructions should be appended as a `developer`
|
||||
// role message containing the configured prompt text.
|
||||
assert!(history.iter().any(|item| matches!(
|
||||
item,
|
||||
ResponseItem::Message { role, content, .. }
|
||||
if role == "developer" && content.iter().any(|c| matches!(
|
||||
c,
|
||||
ContentItem::InputText { text } if text == "SUBAGENT_PROMPT"
|
||||
))
|
||||
)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefers_structured_content_when_present() {
|
||||
let ctr = CallToolResult {
|
||||
@@ -2622,6 +2880,15 @@ mod tests {
|
||||
|
||||
let state = SessionState::new(session_configuration.clone());
|
||||
|
||||
let subagent_registry = SubagentRegistry::new();
|
||||
let subagent_manager = SubagentManager::new(
|
||||
Arc::new(subagent_registry.clone()),
|
||||
config.max_active_subagents,
|
||||
config.root_agent_uses_user_messages,
|
||||
config.subagent_root_inbox_autosubmit,
|
||||
config.subagent_inbox_inject_before_tools,
|
||||
);
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
@@ -2633,6 +2900,8 @@ mod tests {
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_event_manager: otel_event_manager.clone(),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
subagents: subagent_registry,
|
||||
subagent_manager,
|
||||
};
|
||||
|
||||
let turn_context = Session::make_turn_context(
|
||||
@@ -2700,6 +2969,15 @@ mod tests {
|
||||
|
||||
let state = SessionState::new(session_configuration.clone());
|
||||
|
||||
let subagent_registry = SubagentRegistry::new();
|
||||
let subagent_manager = SubagentManager::new(
|
||||
Arc::new(subagent_registry.clone()),
|
||||
config.max_active_subagents,
|
||||
config.root_agent_uses_user_messages,
|
||||
config.subagent_root_inbox_autosubmit,
|
||||
config.subagent_inbox_inject_before_tools,
|
||||
);
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
@@ -2711,6 +2989,8 @@ mod tests {
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_event_manager: otel_event_manager.clone(),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
subagents: subagent_registry,
|
||||
subagent_manager,
|
||||
};
|
||||
|
||||
let turn_context = Arc::new(Session::make_turn_context(
|
||||
@@ -2734,6 +3014,195 @@ mod tests {
|
||||
(session, turn_context, rx_event)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subagent_inbox_tool_only_mode_injects_await_into_parent_and_child() {
|
||||
// Build parent and child sessions and register them so the
|
||||
// SubagentManager can look them up via session_index.
|
||||
let (parent_session_raw, _tc_parent) = make_session_and_context();
|
||||
let parent_session = Arc::new(parent_session_raw);
|
||||
let (child_session_raw, _tc_child) = make_session_and_context();
|
||||
let child_session = Arc::new(child_session_raw);
|
||||
|
||||
crate::session_index::register(parent_session.conversation_id(), &parent_session);
|
||||
crate::session_index::register(child_session.conversation_id(), &child_session);
|
||||
|
||||
// Independent registry/manager used only for this test so we can
|
||||
// construct metadata and an AwaitInboxResult by hand.
|
||||
let registry = Arc::new(SubagentRegistry::new());
|
||||
let manager = SubagentManager::new(Arc::clone(®istry), 4, false, false, false);
|
||||
|
||||
let agent_id = 1;
|
||||
let initial_message_count = 0;
|
||||
let metadata = registry
|
||||
.register_spawn(
|
||||
child_session.conversation_id(),
|
||||
Some(parent_session.conversation_id()),
|
||||
Some(agent_id),
|
||||
agent_id,
|
||||
initial_message_count,
|
||||
Some("child".to_string()),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let messages = vec![InboxMessage {
|
||||
sender_agent_id: 0,
|
||||
recipient_agent_id: agent_id,
|
||||
interrupt: false,
|
||||
prompt: Some("hello child".to_string()),
|
||||
timestamp_ms: 1_000,
|
||||
}];
|
||||
|
||||
let await_result = AwaitInboxResult {
|
||||
metadata,
|
||||
completion: Some(SubagentCompletion::Completed {
|
||||
last_message: Some("done".to_string()),
|
||||
}),
|
||||
messages,
|
||||
};
|
||||
|
||||
manager
|
||||
.deliver_inbox_to_threads_at_yield(&await_result)
|
||||
.await;
|
||||
|
||||
// Child history should contain a synthetic subagent_await call in
|
||||
// tool-only mode.
|
||||
let child_history = child_session.clone_history().await.get_history();
|
||||
assert!(child_history.iter().any(|item| matches!(
|
||||
item,
|
||||
ResponseItem::FunctionCall { name, .. } if name == "subagent_await"
|
||||
)));
|
||||
|
||||
// And the user-visible payload should include the original prompt.
|
||||
assert!(child_history.iter().any(|item| match item {
|
||||
ResponseItem::FunctionCallOutput { output, .. } => {
|
||||
let Ok(value) = serde_json::from_str::<serde_json::Value>(&output.content) else {
|
||||
return false;
|
||||
};
|
||||
value["messages"]
|
||||
.as_array()
|
||||
.is_some_and(|msgs| msgs.iter().any(|m| m["prompt"] == "hello child"))
|
||||
}
|
||||
_ => false,
|
||||
}));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subagent_inbox_root_messages_become_user_turns_when_toggle_enabled() {
|
||||
use codex_protocol::models::ContentItem;
|
||||
|
||||
let (parent_session_raw, _tc_parent) = make_session_and_context();
|
||||
let parent_session = Arc::new(parent_session_raw);
|
||||
let (child_session_raw, _tc_child) = make_session_and_context();
|
||||
let child_session = Arc::new(child_session_raw);
|
||||
|
||||
crate::session_index::register(parent_session.conversation_id(), &parent_session);
|
||||
crate::session_index::register(child_session.conversation_id(), &child_session);
|
||||
|
||||
let registry = Arc::new(SubagentRegistry::new());
|
||||
let manager = SubagentManager::new(Arc::clone(®istry), 4, true, false, false);
|
||||
|
||||
let agent_id = 1;
|
||||
let initial_message_count = 0;
|
||||
let metadata = registry
|
||||
.register_spawn(
|
||||
child_session.conversation_id(),
|
||||
Some(parent_session.conversation_id()),
|
||||
Some(agent_id),
|
||||
agent_id,
|
||||
initial_message_count,
|
||||
Some("child".to_string()),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let messages = vec![InboxMessage {
|
||||
sender_agent_id: 0,
|
||||
recipient_agent_id: agent_id,
|
||||
interrupt: false,
|
||||
prompt: Some("hello child".to_string()),
|
||||
timestamp_ms: 1_000,
|
||||
}];
|
||||
|
||||
let await_result = AwaitInboxResult {
|
||||
metadata,
|
||||
completion: Some(SubagentCompletion::Completed {
|
||||
last_message: Some("done".to_string()),
|
||||
}),
|
||||
messages,
|
||||
};
|
||||
|
||||
manager
|
||||
.deliver_inbox_to_threads_at_yield(&await_result)
|
||||
.await;
|
||||
|
||||
// In toggle-on mode, the child should still see a synthetic
|
||||
// subagent_await reflecting completion, even when the only inbox
|
||||
// messages came from the root.
|
||||
let child_history = child_session.clone_history().await.get_history();
|
||||
assert!(child_history.iter().any(|item| matches!(
|
||||
item,
|
||||
ResponseItem::FunctionCall { name, .. } if name == "subagent_await"
|
||||
)));
|
||||
|
||||
// Instead, the root-origin prompt should appear as a user message.
|
||||
assert!(child_history.iter().any(|item| match item {
|
||||
ResponseItem::Message { role, content, .. } if role == "user" => {
|
||||
content.iter().any(|c| match c {
|
||||
ContentItem::InputText { text } => text == "hello child",
|
||||
_ => false,
|
||||
})
|
||||
}
|
||||
_ => false,
|
||||
}));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subagent_completion_without_messages_surfaces_await_to_parent_and_child() {
|
||||
let (parent_session_raw, _tc_parent) = make_session_and_context();
|
||||
let parent_session = Arc::new(parent_session_raw);
|
||||
let (child_session_raw, _tc_child) = make_session_and_context();
|
||||
let child_session = Arc::new(child_session_raw);
|
||||
|
||||
crate::session_index::register(parent_session.conversation_id(), &parent_session);
|
||||
crate::session_index::register(child_session.conversation_id(), &child_session);
|
||||
|
||||
let registry = Arc::new(SubagentRegistry::new());
|
||||
let manager = SubagentManager::new(Arc::clone(®istry), 4, false, false, false);
|
||||
|
||||
let agent_id = 1;
|
||||
let initial_message_count = 0;
|
||||
let metadata = registry
|
||||
.register_spawn(
|
||||
child_session.conversation_id(),
|
||||
Some(parent_session.conversation_id()),
|
||||
Some(agent_id),
|
||||
agent_id,
|
||||
initial_message_count,
|
||||
Some("child".to_string()),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let await_result = AwaitInboxResult {
|
||||
metadata,
|
||||
completion: Some(SubagentCompletion::Completed {
|
||||
last_message: Some("done".to_string()),
|
||||
}),
|
||||
messages: Vec::new(),
|
||||
};
|
||||
|
||||
manager
|
||||
.deliver_inbox_to_threads_at_yield(&await_result)
|
||||
.await;
|
||||
|
||||
let child_history = child_session.clone_history().await.get_history();
|
||||
assert!(child_history.iter().any(|item| matches!(
|
||||
item,
|
||||
ResponseItem::FunctionCall { name, .. } if name == "subagent_await"
|
||||
)));
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct NeverEndingTask {
|
||||
kind: TaskKind,
|
||||
@@ -3072,8 +3541,6 @@ mod tests {
|
||||
with_escalated_permissions: Some(true),
|
||||
justification: Some("test".to_string()),
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let params2 = ExecParams {
|
||||
@@ -3084,8 +3551,6 @@ mod tests {
|
||||
env: HashMap::new(),
|
||||
justification: params.justification.clone(),
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let turn_diff_tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
|
||||
@@ -3214,4 +3679,90 @@ mod tests {
|
||||
|
||||
pretty_assertions::assert_eq!(output, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_turn_items_orders_inbox_after_tools_by_default() {
|
||||
let outputs = vec![ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: "tool-output".to_string(),
|
||||
}],
|
||||
}];
|
||||
let new_inputs = vec![ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-1".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "tool-response".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}];
|
||||
let inbox = vec![ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "subagent_await".to_string(),
|
||||
arguments: "{}".to_string(),
|
||||
call_id: "await-1".to_string(),
|
||||
}];
|
||||
|
||||
let merged = merge_turn_items_for_recording(&outputs, &new_inputs, &inbox, false);
|
||||
|
||||
assert_eq!(merged.len(), 3);
|
||||
// outputs_to_record first
|
||||
assert!(matches!(
|
||||
&merged[0],
|
||||
ResponseItem::Message { role, .. } if role == "assistant"
|
||||
));
|
||||
// new_inputs_to_record next
|
||||
assert!(matches!(
|
||||
&merged[1],
|
||||
ResponseItem::FunctionCallOutput { call_id, .. } if call_id == "call-1"
|
||||
));
|
||||
// inbox-derived synthetic await last
|
||||
assert!(matches!(
|
||||
&merged[2],
|
||||
ResponseItem::FunctionCall { name, .. } if name == "subagent_await"
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_turn_items_orders_inbox_before_tools_when_configured() {
|
||||
let outputs = vec![ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: "tool-output".to_string(),
|
||||
}],
|
||||
}];
|
||||
let new_inputs = vec![ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-1".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "tool-response".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}];
|
||||
let inbox = vec![ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "subagent_await".to_string(),
|
||||
arguments: "{}".to_string(),
|
||||
call_id: "await-1".to_string(),
|
||||
}];
|
||||
|
||||
let merged = merge_turn_items_for_recording(&outputs, &new_inputs, &inbox, true);
|
||||
|
||||
assert_eq!(merged.len(), 3);
|
||||
// outputs_to_record first
|
||||
assert!(matches!(
|
||||
&merged[0],
|
||||
ResponseItem::Message { role, .. } if role == "assistant"
|
||||
));
|
||||
// inbox-derived synthetic await next
|
||||
assert!(matches!(
|
||||
&merged[1],
|
||||
ResponseItem::FunctionCall { name, .. } if name == "subagent_await"
|
||||
));
|
||||
// new_inputs_to_record last
|
||||
assert!(matches!(
|
||||
&merged[2],
|
||||
ResponseItem::FunctionCallOutput { call_id, .. } if call_id == "call-1"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::sync::atomic::AtomicU64;
|
||||
use async_channel::Receiver;
|
||||
use async_channel::Sender;
|
||||
use codex_async_utils::OrCancelExt;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
@@ -30,13 +31,16 @@ use codex_protocol::protocol::InitialHistory;
|
||||
/// The returned `events_rx` yields non-approval events emitted by the sub-agent.
|
||||
/// Approval requests are handled via `parent_session` and are not surfaced.
|
||||
/// The returned `ops_tx` allows the caller to submit additional `Op`s to the sub-agent.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn run_codex_conversation_interactive(
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
parent_session: Arc<Session>,
|
||||
parent_ctx: Arc<TurnContext>,
|
||||
cancel_token: CancellationToken,
|
||||
desired_conversation_id: Option<ConversationId>,
|
||||
initial_history: Option<InitialHistory>,
|
||||
source: SubAgentSource,
|
||||
) -> Result<Codex, CodexErr> {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
@@ -45,7 +49,8 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
config,
|
||||
auth_manager,
|
||||
initial_history.unwrap_or(InitialHistory::New),
|
||||
SessionSource::SubAgent(SubAgentSource::Review),
|
||||
SessionSource::SubAgent(source),
|
||||
desired_conversation_id,
|
||||
)
|
||||
.await?;
|
||||
let codex = Arc::new(codex);
|
||||
@@ -81,6 +86,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub: tx_ops,
|
||||
rx_event: rx_sub,
|
||||
conversation_id: codex.conversation_id(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -105,13 +111,16 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
parent_session,
|
||||
parent_ctx,
|
||||
child_cancel.clone(),
|
||||
None,
|
||||
initial_history,
|
||||
SubAgentSource::Review,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Send the initial input to kick off the one-shot turn.
|
||||
io.submit(Op::UserInput { items: input }).await?;
|
||||
|
||||
let conversation_id = io.conversation_id();
|
||||
// Bridge events so we can observe completion and shut down automatically.
|
||||
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let ops_tx = io.tx_sub.clone();
|
||||
@@ -146,6 +155,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
next_id: AtomicU64::new(0),
|
||||
rx_event: rx_bridge,
|
||||
tx_sub: tx_closed,
|
||||
conversation_id,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,9 @@ use crate::sandboxing::SandboxPermissions;
|
||||
|
||||
use crate::bash::parse_shell_lc_plain_commands;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
#[cfg(windows)]
|
||||
#[path = "windows_dangerous_commands.rs"]
|
||||
mod windows_dangerous_commands;
|
||||
|
||||
pub fn requires_initial_appoval(
|
||||
policy: AskForApproval,
|
||||
@@ -36,6 +39,13 @@ pub fn requires_initial_appoval(
|
||||
}
|
||||
|
||||
pub fn command_might_be_dangerous(command: &[String]) -> bool {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if windows_dangerous_commands::is_dangerous_command_windows(command) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if is_dangerous_to_call_with_exec(command) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -267,6 +267,20 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn windows_powershell_full_path_is_safe() {
|
||||
if !cfg!(windows) {
|
||||
// Windows only because on Linux path splitting doesn't handle `/` separators properly
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(is_known_safe_command(&vec_str(&[
|
||||
r"C:\Program Files\PowerShell\7\pwsh.exe",
|
||||
"-Command",
|
||||
"Get-Location",
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bash_lc_safe_examples() {
|
||||
assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls"])));
|
||||
|
||||
316
codex-rs/core/src/command_safety/windows_dangerous_commands.rs
Normal file
316
codex-rs/core/src/command_safety/windows_dangerous_commands.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
use std::path::Path;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use shlex::split as shlex_split;
|
||||
use url::Url;
|
||||
|
||||
pub fn is_dangerous_command_windows(command: &[String]) -> bool {
|
||||
// Prefer structured parsing for PowerShell/CMD so we can spot URL-bearing
|
||||
// invocations of ShellExecute-style entry points before falling back to
|
||||
// simple argv heuristics.
|
||||
if is_dangerous_powershell(command) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if is_dangerous_cmd(command) {
|
||||
return true;
|
||||
}
|
||||
|
||||
is_direct_gui_launch(command)
|
||||
}
|
||||
|
||||
fn is_dangerous_powershell(command: &[String]) -> bool {
|
||||
let Some((exe, rest)) = command.split_first() else {
|
||||
return false;
|
||||
};
|
||||
if !is_powershell_executable(exe) {
|
||||
return false;
|
||||
}
|
||||
// Parse the PowerShell invocation to get a flat token list we can scan for
|
||||
// dangerous cmdlets/COM calls plus any URL-looking arguments. This is a
|
||||
// best-effort shlex split of the script text, not a full PS parser.
|
||||
let Some(parsed) = parse_powershell_invocation(rest) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let tokens_lc: Vec<String> = parsed
|
||||
.tokens
|
||||
.iter()
|
||||
.map(|t| t.trim_matches('\'').trim_matches('"').to_ascii_lowercase())
|
||||
.collect();
|
||||
let has_url = args_have_url(&parsed.tokens);
|
||||
|
||||
if has_url
|
||||
&& tokens_lc.iter().any(|t| {
|
||||
matches!(
|
||||
t.as_str(),
|
||||
"start-process" | "start" | "saps" | "invoke-item" | "ii"
|
||||
) || t.contains("start-process")
|
||||
|| t.contains("invoke-item")
|
||||
})
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if has_url
|
||||
&& tokens_lc
|
||||
.iter()
|
||||
.any(|t| t.contains("shellexecute") || t.contains("shell.application"))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(first) = tokens_lc.first() {
|
||||
// Legacy ShellExecute path via url.dll
|
||||
if first == "rundll32"
|
||||
&& tokens_lc
|
||||
.iter()
|
||||
.any(|t| t.contains("url.dll,fileprotocolhandler"))
|
||||
&& has_url
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if first == "mshta" && has_url {
|
||||
return true;
|
||||
}
|
||||
if is_browser_executable(first) && has_url {
|
||||
return true;
|
||||
}
|
||||
if matches!(first.as_str(), "explorer" | "explorer.exe") && has_url {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn is_dangerous_cmd(command: &[String]) -> bool {
|
||||
let Some((exe, rest)) = command.split_first() else {
|
||||
return false;
|
||||
};
|
||||
let Some(base) = executable_basename(exe) else {
|
||||
return false;
|
||||
};
|
||||
if base != "cmd" && base != "cmd.exe" {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut iter = rest.iter();
|
||||
for arg in iter.by_ref() {
|
||||
let lower = arg.to_ascii_lowercase();
|
||||
match lower.as_str() {
|
||||
"/c" | "/r" | "-c" => break,
|
||||
_ if lower.starts_with('/') => continue,
|
||||
// Unknown tokens before the command body => bail.
|
||||
_ => return false,
|
||||
}
|
||||
}
|
||||
|
||||
let Some(first_cmd) = iter.next() else {
|
||||
return false;
|
||||
};
|
||||
// Classic `cmd /c start https://...` ShellExecute path.
|
||||
if !first_cmd.eq_ignore_ascii_case("start") {
|
||||
return false;
|
||||
}
|
||||
let remaining: Vec<String> = iter.cloned().collect();
|
||||
args_have_url(&remaining)
|
||||
}
|
||||
|
||||
fn is_direct_gui_launch(command: &[String]) -> bool {
|
||||
let Some((exe, rest)) = command.split_first() else {
|
||||
return false;
|
||||
};
|
||||
let Some(base) = executable_basename(exe) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
// Explorer/rundll32/mshta or direct browser exe with a URL anywhere in args.
|
||||
if matches!(base.as_str(), "explorer" | "explorer.exe") && args_have_url(rest) {
|
||||
return true;
|
||||
}
|
||||
if matches!(base.as_str(), "mshta" | "mshta.exe") && args_have_url(rest) {
|
||||
return true;
|
||||
}
|
||||
if (base == "rundll32" || base == "rundll32.exe")
|
||||
&& rest.iter().any(|t| {
|
||||
t.to_ascii_lowercase()
|
||||
.contains("url.dll,fileprotocolhandler")
|
||||
})
|
||||
&& args_have_url(rest)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if is_browser_executable(&base) && args_have_url(rest) {
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn args_have_url(args: &[String]) -> bool {
|
||||
args.iter().any(|arg| looks_like_url(arg))
|
||||
}
|
||||
|
||||
fn looks_like_url(token: &str) -> bool {
|
||||
// Strip common PowerShell punctuation around inline URLs (quotes, parens, trailing semicolons).
|
||||
// Capture the middle token after trimming leading quotes/parens/whitespace and trailing semicolons/closing parens.
|
||||
static RE: Lazy<Option<Regex>> =
|
||||
Lazy::new(|| Regex::new(r#"^[ "'\(\s]*([^\s"'\);]+)[\s;\)]*$"#).ok());
|
||||
// If the token embeds a URL alongside other text (e.g., Start-Process('https://...'))
|
||||
// as a single shlex token, grab the substring starting at the first URL prefix.
|
||||
let urlish = token
|
||||
.find("https://")
|
||||
.or_else(|| token.find("http://"))
|
||||
.map(|idx| &token[idx..])
|
||||
.unwrap_or(token);
|
||||
|
||||
let candidate = RE
|
||||
.as_ref()
|
||||
.and_then(|re| re.captures(urlish))
|
||||
.and_then(|caps| caps.get(1))
|
||||
.map(|m| m.as_str())
|
||||
.unwrap_or(urlish);
|
||||
let Ok(url) = Url::parse(candidate) else {
|
||||
return false;
|
||||
};
|
||||
matches!(url.scheme(), "http" | "https")
|
||||
}
|
||||
|
||||
fn executable_basename(exe: &str) -> Option<String> {
|
||||
Path::new(exe)
|
||||
.file_name()
|
||||
.and_then(|osstr| osstr.to_str())
|
||||
.map(str::to_ascii_lowercase)
|
||||
}
|
||||
|
||||
fn is_powershell_executable(exe: &str) -> bool {
|
||||
matches!(
|
||||
executable_basename(exe).as_deref(),
|
||||
Some("powershell") | Some("powershell.exe") | Some("pwsh") | Some("pwsh.exe")
|
||||
)
|
||||
}
|
||||
|
||||
fn is_browser_executable(name: &str) -> bool {
|
||||
matches!(
|
||||
name,
|
||||
"chrome"
|
||||
| "chrome.exe"
|
||||
| "msedge"
|
||||
| "msedge.exe"
|
||||
| "firefox"
|
||||
| "firefox.exe"
|
||||
| "iexplore"
|
||||
| "iexplore.exe"
|
||||
)
|
||||
}
|
||||
|
||||
struct ParsedPowershell {
|
||||
tokens: Vec<String>,
|
||||
}
|
||||
|
||||
fn parse_powershell_invocation(args: &[String]) -> Option<ParsedPowershell> {
|
||||
if args.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut idx = 0;
|
||||
while idx < args.len() {
|
||||
let arg = &args[idx];
|
||||
let lower = arg.to_ascii_lowercase();
|
||||
match lower.as_str() {
|
||||
"-command" | "/command" | "-c" => {
|
||||
let script = args.get(idx + 1)?;
|
||||
if idx + 2 != args.len() {
|
||||
return None;
|
||||
}
|
||||
let tokens = shlex_split(script)?;
|
||||
return Some(ParsedPowershell { tokens });
|
||||
}
|
||||
_ if lower.starts_with("-command:") || lower.starts_with("/command:") => {
|
||||
if idx + 1 != args.len() {
|
||||
return None;
|
||||
}
|
||||
let (_, script) = arg.split_once(':')?;
|
||||
let tokens = shlex_split(script)?;
|
||||
return Some(ParsedPowershell { tokens });
|
||||
}
|
||||
"-nologo" | "-noprofile" | "-noninteractive" | "-mta" | "-sta" => {
|
||||
idx += 1;
|
||||
}
|
||||
_ if lower.starts_with('-') => {
|
||||
idx += 1;
|
||||
}
|
||||
_ => {
|
||||
let rest = args[idx..].to_vec();
|
||||
return Some(ParsedPowershell { tokens: rest });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::is_dangerous_command_windows;
|
||||
|
||||
fn vec_str(items: &[&str]) -> Vec<String> {
|
||||
items.iter().map(std::string::ToString::to_string).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn powershell_start_process_url_is_dangerous() {
|
||||
assert!(is_dangerous_command_windows(&vec_str(&[
|
||||
"powershell",
|
||||
"-NoLogo",
|
||||
"-Command",
|
||||
"Start-Process 'https://example.com'"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn powershell_start_process_url_with_trailing_semicolon_is_dangerous() {
|
||||
assert!(is_dangerous_command_windows(&vec_str(&[
|
||||
"powershell",
|
||||
"-Command",
|
||||
"Start-Process('https://example.com');"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn powershell_start_process_local_is_not_flagged() {
|
||||
assert!(!is_dangerous_command_windows(&vec_str(&[
|
||||
"powershell",
|
||||
"-Command",
|
||||
"Start-Process notepad.exe"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cmd_start_with_url_is_dangerous() {
|
||||
assert!(is_dangerous_command_windows(&vec_str(&[
|
||||
"cmd",
|
||||
"/c",
|
||||
"start",
|
||||
"https://example.com"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn msedge_with_url_is_dangerous() {
|
||||
assert!(is_dangerous_command_windows(&vec_str(&[
|
||||
"msedge.exe",
|
||||
"https://example.com"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn explorer_with_directory_is_not_flagged() {
|
||||
assert!(!is_dangerous_command_windows(&vec_str(&[
|
||||
"explorer.exe",
|
||||
"."
|
||||
])));
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
use shlex::split as shlex_split;
|
||||
use std::path::Path;
|
||||
|
||||
/// On Windows, we conservatively allow only clearly read-only PowerShell invocations
|
||||
/// that match a small safelist. Anything else (including direct CMD commands) is unsafe.
|
||||
@@ -131,8 +132,14 @@ fn split_into_commands(tokens: Vec<String>) -> Option<Vec<Vec<String>>> {
|
||||
|
||||
/// Returns true when the executable name is one of the supported PowerShell binaries.
|
||||
fn is_powershell_executable(exe: &str) -> bool {
|
||||
let executable_name = Path::new(exe)
|
||||
.file_name()
|
||||
.and_then(|osstr| osstr.to_str())
|
||||
.unwrap_or(exe)
|
||||
.to_ascii_lowercase();
|
||||
|
||||
matches!(
|
||||
exe.to_ascii_lowercase().as_str(),
|
||||
executable_name.as_str(),
|
||||
"powershell" | "powershell.exe" | "pwsh" | "pwsh.exe"
|
||||
)
|
||||
}
|
||||
@@ -313,6 +320,27 @@ mod tests {
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_full_path_powershell_invocations() {
|
||||
if !cfg!(windows) {
|
||||
// Windows only because on Linux path splitting doesn't handle `/` separators properly
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
r"C:\Program Files\PowerShell\7\pwsh.exe",
|
||||
"-NoProfile",
|
||||
"-Command",
|
||||
"Get-ChildItem -Path .",
|
||||
])));
|
||||
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe",
|
||||
"-Command",
|
||||
"Get-Content Cargo.toml",
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn allows_read_only_pipelines_and_git_usage() {
|
||||
assert!(is_safe_command_windows(&vec_str(&[
|
||||
|
||||
@@ -52,6 +52,7 @@ use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use toml::Value as TomlValue;
|
||||
@@ -70,6 +71,10 @@ pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex";
|
||||
/// the context window.
|
||||
pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
|
||||
|
||||
pub(crate) const DEFAULT_MAX_ACTIVE_SUBAGENTS: usize = 8;
|
||||
pub(crate) const MIN_MAX_ACTIVE_SUBAGENTS: usize = 1;
|
||||
pub(crate) const MAX_MAX_ACTIVE_SUBAGENTS: usize = 64;
|
||||
|
||||
pub(crate) const CONFIG_TOML_FILE: &str = "config.toml";
|
||||
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
@@ -86,9 +91,6 @@ pub struct Config {
|
||||
/// Size of the context window for the model, in tokens.
|
||||
pub model_context_window: Option<i64>,
|
||||
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<i64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
@@ -107,6 +109,9 @@ pub struct Config {
|
||||
/// for either of approval_policy or sandbox_mode.
|
||||
pub did_user_set_custom_approval_policy_or_sandbox_mode: bool,
|
||||
|
||||
/// Maximum number of concurrently active subagents allowed in a session.
|
||||
pub max_active_subagents: usize,
|
||||
|
||||
/// On Windows, indicates that a previously configured workspace-write sandbox
|
||||
/// was coerced to read-only because native auto mode is unsupported.
|
||||
pub forced_auto_mode_downgraded_on_windows: bool,
|
||||
@@ -131,6 +136,30 @@ pub struct Config {
|
||||
/// Developer instructions override injected as a separate message.
|
||||
pub developer_instructions: Option<String>,
|
||||
|
||||
/// When true, messages from the root agent to a subagent should be
|
||||
/// surfaced as `user` role messages in the child’s history instead of
|
||||
/// relying solely on tool calls and inbox semantics. This is useful for
|
||||
/// evaluations that compare direct user-style turns versus tool-mediated
|
||||
/// messaging. When false, root-to-child communication is modeled purely
|
||||
/// via tools and the subagent inbox.
|
||||
pub root_agent_uses_user_messages: bool,
|
||||
|
||||
/// When true, the root agent will, at turn boundaries, drain subagent
|
||||
/// inboxes and inject synthetic `subagent_await` calls + outputs into the
|
||||
/// message stream, and may auto-start a new turn when idle. When false,
|
||||
/// subagent inboxes are only surfaced when explicitly awaited or at
|
||||
/// subagent-specific yield points.
|
||||
pub subagent_root_inbox_autosubmit: bool,
|
||||
|
||||
/// Controls where synthetic `subagent_await` tool calls and outputs for
|
||||
/// inbox delivery are injected relative to real tool call outputs inside a
|
||||
/// turn. When true, inbox-derived `subagent_await` items are recorded
|
||||
/// *before* tool outputs (Option B: closer to chronological ordering). When
|
||||
/// false (default), they are recorded *after* tool outputs (Option A:
|
||||
/// closer to training-time behavior where the model generally sees its own
|
||||
/// tool call and result before additional context).
|
||||
pub subagent_inbox_inject_before_tools: bool,
|
||||
|
||||
/// Compact prompt override.
|
||||
pub compact_prompt: Option<String>,
|
||||
|
||||
@@ -248,6 +277,7 @@ pub struct Config {
|
||||
pub experimental_sandbox_command_assessment: bool,
|
||||
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
#[allow(dead_code)]
|
||||
pub use_experimental_unified_exec_tool: bool,
|
||||
|
||||
/// If set to `true`, use the experimental official Rust MCP client.
|
||||
@@ -570,9 +600,6 @@ pub struct ConfigToml {
|
||||
/// Size of the context window for the model, in tokens.
|
||||
pub model_context_window: Option<i64>,
|
||||
|
||||
/// Maximum number of output tokens.
|
||||
pub model_max_output_tokens: Option<i64>,
|
||||
|
||||
/// Token usage threshold triggering auto-compaction of conversation history.
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
|
||||
@@ -602,6 +629,30 @@ pub struct ConfigToml {
|
||||
/// Compact prompt used for history compaction.
|
||||
pub compact_prompt: Option<String>,
|
||||
|
||||
/// When true, messages from the root agent to subagents should be
|
||||
/// represented as `user` role messages in the child’s history. When
|
||||
/// false or unset, root-to-child communication is modeled purely via
|
||||
/// `subagent_send_message` and inbox delivery.
|
||||
#[serde(default)]
|
||||
pub root_agent_uses_user_messages: Option<bool>,
|
||||
|
||||
/// When true, the root agent drains subagent inboxes at turn boundaries
|
||||
/// and may auto-start new turns when idle. When false or unset, the root
|
||||
/// only observes subagent inboxes via explicit `subagent_await` calls or
|
||||
/// subagent-driven yield points.
|
||||
#[serde(default)]
|
||||
pub subagent_root_inbox_autosubmit: Option<bool>,
|
||||
|
||||
/// When true, inbox-derived `subagent_await` calls and outputs are
|
||||
/// injected *before* tool outputs inside a turn (Option B, closer to
|
||||
/// strict chronological ordering). When false or unset, synthetic\n /// `subagent_await` entries are injected *after* tool outputs (Option A,
|
||||
/// closer to training-time patterns where the model generally sees its own
|
||||
/// tool call and result before extra context).\n #[serde(default)]
|
||||
pub subagent_inbox_inject_before_tools: Option<bool>,
|
||||
|
||||
/// Maximum number of concurrently active subagents allowed in a session.
|
||||
pub max_active_subagents: Option<usize>,
|
||||
|
||||
/// When set, restricts ChatGPT login to a specific workspace identifier.
|
||||
#[serde(default)]
|
||||
pub forced_chatgpt_workspace_id: Option<String>,
|
||||
@@ -895,6 +946,10 @@ pub struct ConfigOverrides {
|
||||
pub base_instructions: Option<String>,
|
||||
pub developer_instructions: Option<String>,
|
||||
pub compact_prompt: Option<String>,
|
||||
pub max_active_subagents: Option<usize>,
|
||||
pub root_agent_uses_user_messages: Option<bool>,
|
||||
pub subagent_root_inbox_autosubmit: Option<bool>,
|
||||
pub subagent_inbox_inject_before_tools: Option<bool>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
pub tools_web_search_request: Option<bool>,
|
||||
@@ -932,6 +987,33 @@ pub fn resolve_oss_provider(
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Clone the existing config with a model override, re-deriving any model-specific fields.
|
||||
pub fn clone_with_model_override(&self, model: &str) -> std::io::Result<Self> {
|
||||
if model.trim().is_empty() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
"model cannot be empty",
|
||||
));
|
||||
}
|
||||
|
||||
let mut cfg = self.clone();
|
||||
cfg.model = model.trim().to_string();
|
||||
|
||||
let model_family = find_family_for_model(&cfg.model)
|
||||
.unwrap_or_else(|| derive_default_model_family(&cfg.model));
|
||||
cfg.model_family = model_family;
|
||||
|
||||
if let Some(info) = get_model_info(&cfg.model_family) {
|
||||
cfg.model_context_window = Some(info.context_window);
|
||||
cfg.model_auto_compact_token_limit = info.auto_compact_token_limit;
|
||||
} else {
|
||||
cfg.model_context_window = None;
|
||||
cfg.model_auto_compact_token_limit = None;
|
||||
}
|
||||
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
/// Meant to be used exclusively for tests: `load_with_overrides()` should
|
||||
/// be used in all other cases.
|
||||
pub fn load_from_base_config_with_overrides(
|
||||
@@ -954,6 +1036,10 @@ impl Config {
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
max_active_subagents,
|
||||
root_agent_uses_user_messages,
|
||||
subagent_root_inbox_autosubmit: _,
|
||||
subagent_inbox_inject_before_tools: _,
|
||||
include_apply_patch_tool: include_apply_patch_tool_override,
|
||||
show_raw_agent_reasoning,
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
@@ -1086,6 +1172,7 @@ impl Config {
|
||||
|
||||
let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform);
|
||||
let tools_web_search_request = features.enabled(Feature::WebSearchRequest);
|
||||
#[allow(dead_code)]
|
||||
let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
|
||||
let use_experimental_use_rmcp_client = features.enabled(Feature::RmcpClient);
|
||||
let experimental_sandbox_command_assessment =
|
||||
@@ -1122,11 +1209,6 @@ impl Config {
|
||||
let model_context_window = cfg
|
||||
.model_context_window
|
||||
.or_else(|| openai_model_info.as_ref().map(|info| info.context_window));
|
||||
let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| {
|
||||
openai_model_info
|
||||
.as_ref()
|
||||
.map(|info| info.max_output_tokens)
|
||||
});
|
||||
let model_auto_compact_token_limit = cfg.model_auto_compact_token_limit.or_else(|| {
|
||||
openai_model_info
|
||||
.as_ref()
|
||||
@@ -1173,12 +1255,42 @@ impl Config {
|
||||
.or(cfg.review_model)
|
||||
.unwrap_or_else(default_review_model);
|
||||
|
||||
let raw_max_active_subagents = max_active_subagents
|
||||
.or(config_profile.max_active_subagents)
|
||||
.or(cfg.max_active_subagents)
|
||||
.unwrap_or(DEFAULT_MAX_ACTIVE_SUBAGENTS);
|
||||
|
||||
if raw_max_active_subagents < MIN_MAX_ACTIVE_SUBAGENTS {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"max_active_subagents must be at least {MIN_MAX_ACTIVE_SUBAGENTS}, got {raw_max_active_subagents}"
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
let max_active_subagents = if raw_max_active_subagents > MAX_MAX_ACTIVE_SUBAGENTS {
|
||||
warn!(
|
||||
"max_active_subagents clamped from {} to {}",
|
||||
raw_max_active_subagents, MAX_MAX_ACTIVE_SUBAGENTS
|
||||
);
|
||||
MAX_MAX_ACTIVE_SUBAGENTS
|
||||
} else {
|
||||
raw_max_active_subagents
|
||||
};
|
||||
|
||||
let root_agent_uses_user_messages = root_agent_uses_user_messages
|
||||
.or(cfg.root_agent_uses_user_messages)
|
||||
.unwrap_or(true);
|
||||
let subagent_root_inbox_autosubmit = cfg.subagent_root_inbox_autosubmit.unwrap_or(true);
|
||||
let subagent_inbox_inject_before_tools =
|
||||
cfg.subagent_inbox_inject_before_tools.unwrap_or(false);
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
review_model,
|
||||
model_family,
|
||||
model_context_window,
|
||||
model_max_output_tokens,
|
||||
model_auto_compact_token_limit,
|
||||
model_provider_id,
|
||||
model_provider,
|
||||
@@ -1192,6 +1304,9 @@ impl Config {
|
||||
user_instructions,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
root_agent_uses_user_messages,
|
||||
subagent_root_inbox_autosubmit,
|
||||
subagent_inbox_inject_before_tools,
|
||||
compact_prompt,
|
||||
// The config.toml omits "_mode" because it's a config file. However, "_mode"
|
||||
// is important in code to differentiate the mode from the store implementation.
|
||||
@@ -1226,6 +1341,7 @@ impl Config {
|
||||
.show_raw_agent_reasoning
|
||||
.or(show_raw_agent_reasoning)
|
||||
.unwrap_or(false),
|
||||
max_active_subagents,
|
||||
model_reasoning_effort: config_profile
|
||||
.model_reasoning_effort
|
||||
.or(cfg.model_reasoning_effort),
|
||||
@@ -1623,6 +1739,73 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_active_subagents_defaults_and_overrides() -> std::io::Result<()> {
|
||||
let temp_dir = TempDir::new()?;
|
||||
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
temp_dir.path().to_path_buf(),
|
||||
)?;
|
||||
assert_eq!(config.max_active_subagents, DEFAULT_MAX_ACTIVE_SUBAGENTS);
|
||||
|
||||
let custom = ConfigToml {
|
||||
max_active_subagents: Some(3),
|
||||
..ConfigToml::default()
|
||||
};
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
custom,
|
||||
ConfigOverrides::default(),
|
||||
temp_dir.path().to_path_buf(),
|
||||
)?;
|
||||
assert_eq!(config.max_active_subagents, 3);
|
||||
|
||||
let overrides = ConfigOverrides {
|
||||
max_active_subagents: Some(2),
|
||||
..Default::default()
|
||||
};
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
overrides,
|
||||
temp_dir.path().to_path_buf(),
|
||||
)?;
|
||||
assert_eq!(config.max_active_subagents, 2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_active_subagents_validates_bounds() {
|
||||
let temp_dir = TempDir::new().expect("tempdir");
|
||||
|
||||
// Below minimum should error.
|
||||
let cfg_zero = ConfigToml {
|
||||
max_active_subagents: Some(0),
|
||||
..ConfigToml::default()
|
||||
};
|
||||
let err = Config::load_from_base_config_with_overrides(
|
||||
cfg_zero,
|
||||
ConfigOverrides::default(),
|
||||
temp_dir.path().to_path_buf(),
|
||||
)
|
||||
.expect_err("expected invalid input error");
|
||||
assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput);
|
||||
|
||||
// Above ceiling should clamp.
|
||||
let cfg_high = ConfigToml {
|
||||
max_active_subagents: Some(MAX_MAX_ACTIVE_SUBAGENTS + 10),
|
||||
..ConfigToml::default()
|
||||
};
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
cfg_high,
|
||||
ConfigOverrides::default(),
|
||||
temp_dir.path().to_path_buf(),
|
||||
)
|
||||
.expect("clamped config");
|
||||
assert_eq!(config.max_active_subagents, MAX_MAX_ACTIVE_SUBAGENTS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn config_defaults_to_file_cli_auth_store_mode() -> std::io::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -2961,13 +3144,13 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: Some(180_000),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
max_active_subagents: DEFAULT_MAX_ACTIVE_SUBAGENTS,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
user_instructions: None,
|
||||
@@ -3009,6 +3192,9 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
root_agent_uses_user_messages: true,
|
||||
subagent_root_inbox_autosubmit: true,
|
||||
subagent_inbox_inject_before_tools: false,
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -3034,13 +3220,13 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
|
||||
model_context_window: Some(16_385),
|
||||
model_max_output_tokens: Some(4_096),
|
||||
model_auto_compact_token_limit: Some(14_746),
|
||||
model_provider_id: "openai-chat-completions".to_string(),
|
||||
model_provider: fixture.openai_chat_completions_provider.clone(),
|
||||
approval_policy: AskForApproval::UnlessTrusted,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
max_active_subagents: DEFAULT_MAX_ACTIVE_SUBAGENTS,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
user_instructions: None,
|
||||
@@ -3082,6 +3268,9 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
root_agent_uses_user_messages: true,
|
||||
subagent_root_inbox_autosubmit: true,
|
||||
subagent_inbox_inject_before_tools: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -3122,13 +3311,13 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
model_auto_compact_token_limit: Some(180_000),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::OnFailure,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
max_active_subagents: DEFAULT_MAX_ACTIVE_SUBAGENTS,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
user_instructions: None,
|
||||
@@ -3170,6 +3359,9 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
root_agent_uses_user_messages: true,
|
||||
subagent_root_inbox_autosubmit: true,
|
||||
subagent_inbox_inject_before_tools: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
@@ -3196,13 +3388,13 @@ model_verbosity = "high"
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
model_family: find_family_for_model("gpt-5.1").expect("known model slug"),
|
||||
model_context_window: Some(272_000),
|
||||
model_max_output_tokens: Some(128_000),
|
||||
model_auto_compact_token_limit: Some(244_800),
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: AskForApproval::OnFailure,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
max_active_subagents: DEFAULT_MAX_ACTIVE_SUBAGENTS,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
user_instructions: None,
|
||||
@@ -3244,6 +3436,9 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
otel: OtelConfig::default(),
|
||||
root_agent_uses_user_messages: true,
|
||||
subagent_root_inbox_autosubmit: true,
|
||||
subagent_inbox_inject_before_tools: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt5_profile_config, gpt5_profile_config);
|
||||
|
||||
@@ -30,6 +30,7 @@ pub struct ConfigProfile {
|
||||
pub experimental_sandbox_command_assessment: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
pub max_active_subagents: Option<usize>,
|
||||
/// Optional feature toggles scoped to this profile.
|
||||
#[serde(default)]
|
||||
pub features: Option<crate::features::FeaturesToml>,
|
||||
|
||||
@@ -6,7 +6,6 @@ use crate::truncate::truncate_function_output_items_with_policy;
|
||||
use crate::truncate::truncate_text;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo;
|
||||
use std::ops::Deref;
|
||||
@@ -131,47 +130,6 @@ impl ContextManager {
|
||||
normalize::remove_orphan_outputs(&mut self.items);
|
||||
}
|
||||
|
||||
fn get_shell_truncation_policy(&self, call_id: &str) -> Option<TruncationPolicy> {
|
||||
let call = self.get_call_for_call_id(call_id)?;
|
||||
match call {
|
||||
ResponseItem::FunctionCall { arguments, .. } => {
|
||||
let shell_tool_call_params =
|
||||
serde_json::from_str::<ShellToolCallParams>(&arguments).ok()?;
|
||||
Self::create_truncation_policy(
|
||||
shell_tool_call_params.max_output_tokens,
|
||||
shell_tool_call_params.max_output_chars,
|
||||
)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_truncation_policy(
|
||||
max_output_tokens: Option<usize>,
|
||||
max_output_chars: Option<usize>,
|
||||
) -> Option<TruncationPolicy> {
|
||||
if let Some(max_output_tokens) = max_output_tokens {
|
||||
Some(TruncationPolicy::Tokens(max_output_tokens))
|
||||
} else {
|
||||
max_output_chars.map(TruncationPolicy::Bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_call_for_call_id(&self, call_id: &str) -> Option<ResponseItem> {
|
||||
self.items.iter().find_map(|item| match item {
|
||||
ResponseItem::FunctionCall {
|
||||
call_id: existing, ..
|
||||
} => {
|
||||
if existing == call_id {
|
||||
Some(item.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a clone of the contents in the transcript.
|
||||
fn contents(&self) -> Vec<ResponseItem> {
|
||||
self.items.clone()
|
||||
@@ -185,12 +143,13 @@ impl ContextManager {
|
||||
let policy_with_serialization_budget = policy.mul(1.2);
|
||||
match item {
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let truncation_policy_override = self.get_shell_truncation_policy(call_id);
|
||||
let truncation_policy =
|
||||
truncation_policy_override.unwrap_or(policy_with_serialization_budget);
|
||||
let truncated = truncate_text(output.content.as_str(), truncation_policy);
|
||||
let truncated =
|
||||
truncate_text(output.content.as_str(), policy_with_serialization_budget);
|
||||
let truncated_items = output.content_items.as_ref().map(|items| {
|
||||
truncate_function_output_items_with_policy(items, truncation_policy)
|
||||
truncate_function_output_items_with_policy(
|
||||
items,
|
||||
policy_with_serialization_budget,
|
||||
)
|
||||
});
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
|
||||
@@ -74,6 +74,7 @@ impl ConversationManager {
|
||||
auth_manager,
|
||||
InitialHistory::New,
|
||||
self.session_source.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
@@ -150,6 +151,7 @@ impl ConversationManager {
|
||||
auth_manager,
|
||||
initial_history,
|
||||
self.session_source.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
@@ -185,7 +187,14 @@ impl ConversationManager {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, history, self.session_source.clone()).await?;
|
||||
} = Codex::spawn(
|
||||
config,
|
||||
auth_manager,
|
||||
history,
|
||||
self.session_source.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
|
||||
..
|
||||
} => Some(TurnItem::WebSearch(WebSearchItem {
|
||||
id: id.clone().unwrap_or_default(),
|
||||
query: query.clone(),
|
||||
query: query.clone().unwrap_or_default(),
|
||||
})),
|
||||
_ => None,
|
||||
}
|
||||
@@ -306,7 +306,7 @@ mod tests {
|
||||
id: Some("ws_1".to_string()),
|
||||
status: Some("completed".to_string()),
|
||||
action: WebSearchAction::Search {
|
||||
query: "weather".to_string(),
|
||||
query: Some("weather".to_string()),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -57,8 +57,6 @@ pub struct ExecParams {
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub arg0: Option<String>,
|
||||
pub max_output_tokens: Option<usize>,
|
||||
pub max_output_chars: Option<usize>,
|
||||
}
|
||||
|
||||
/// Mechanism to terminate an exec invocation before it finishes naturally.
|
||||
@@ -143,8 +141,6 @@ pub async fn process_exec_tool_call(
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
arg0: _,
|
||||
max_output_tokens,
|
||||
max_output_chars,
|
||||
} = params;
|
||||
|
||||
let (program, args) = command.split_first().ok_or_else(|| {
|
||||
@@ -162,8 +158,6 @@ pub async fn process_exec_tool_call(
|
||||
expiration,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
max_output_tokens,
|
||||
max_output_chars,
|
||||
};
|
||||
|
||||
let manager = SandboxManager::new();
|
||||
@@ -195,8 +189,6 @@ pub(crate) async fn execute_exec_env(
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
arg0,
|
||||
max_output_tokens,
|
||||
max_output_chars,
|
||||
} = env;
|
||||
|
||||
let params = ExecParams {
|
||||
@@ -207,8 +199,6 @@ pub(crate) async fn execute_exec_env(
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
arg0,
|
||||
max_output_tokens,
|
||||
max_output_chars,
|
||||
};
|
||||
|
||||
let start = Instant::now();
|
||||
@@ -851,8 +841,6 @@ mod tests {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let output = exec(params, SandboxType::None, &SandboxPolicy::ReadOnly, None).await?;
|
||||
@@ -898,8 +886,6 @@ mod tests {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(1_000)).await;
|
||||
|
||||
@@ -31,6 +31,9 @@ pub enum Feature {
|
||||
GhostCommit,
|
||||
/// Use the single unified PTY-backed exec tool.
|
||||
UnifiedExec,
|
||||
/// Use the shell command tool that takes `command` as a single string of
|
||||
/// shell instead of an array of args passed to `execvp(3)`.
|
||||
ShellCommandTool,
|
||||
/// Enable experimental RMCP features such as OAuth login.
|
||||
RmcpClient,
|
||||
/// Include the freeform apply_patch tool.
|
||||
@@ -39,6 +42,8 @@ pub enum Feature {
|
||||
ViewImageTool,
|
||||
/// Allow the model to request web searches.
|
||||
WebSearchRequest,
|
||||
/// Enable the built-in subagent orchestration tools.
|
||||
SubagentTools,
|
||||
/// Gate the execpolicy enforcement for shell/unified exec.
|
||||
ExecPolicy,
|
||||
/// Enable the model-based risk assessments for sandboxed commands.
|
||||
@@ -253,25 +258,37 @@ pub struct FeatureSpec {
|
||||
|
||||
pub const FEATURES: &[FeatureSpec] = &[
|
||||
// Stable features.
|
||||
FeatureSpec {
|
||||
id: Feature::GhostCommit,
|
||||
key: "undo",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ViewImageTool,
|
||||
key: "view_image_tool",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ShellTool,
|
||||
key: "shell_tool",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: true,
|
||||
},
|
||||
// Unstable features.
|
||||
FeatureSpec {
|
||||
id: Feature::GhostCommit,
|
||||
key: "ghost_commit",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::UnifiedExec,
|
||||
key: "unified_exec",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ShellCommandTool,
|
||||
key: "shell_command_tool",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::RmcpClient,
|
||||
key: "rmcp_client",
|
||||
@@ -290,6 +307,12 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Stable,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::SubagentTools,
|
||||
key: "subagent_tools",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ExecPolicy,
|
||||
key: "exec_policy",
|
||||
@@ -320,10 +343,4 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ShellTool,
|
||||
key: "shell_tool",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -825,11 +825,21 @@ mod tests {
|
||||
.await
|
||||
.expect("Should collect git info from repo");
|
||||
|
||||
let remote_url_output = Command::new("git")
|
||||
.args(["remote", "get-url", "origin"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to read remote url");
|
||||
// Some dev environments rewrite remotes (e.g., force SSH), so compare against
|
||||
// whatever URL Git reports instead of a fixed placeholder.
|
||||
let expected_remote = String::from_utf8(remote_url_output.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Should have repository URL
|
||||
assert_eq!(
|
||||
git_info.repository_url,
|
||||
Some("https://github.com/example/repo.git".to_string())
|
||||
);
|
||||
assert_eq!(git_info.repository_url, Some(expected_remote));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -39,6 +39,7 @@ pub mod parse_command;
|
||||
pub mod powershell;
|
||||
mod response_processing;
|
||||
pub mod sandboxing;
|
||||
pub mod subagents;
|
||||
mod text_encoding;
|
||||
pub mod token_data;
|
||||
mod truncate;
|
||||
@@ -68,6 +69,7 @@ pub mod project_doc;
|
||||
mod rollout;
|
||||
pub(crate) mod safety;
|
||||
pub mod seatbelt;
|
||||
mod session_index;
|
||||
pub mod shell;
|
||||
pub mod spawn;
|
||||
pub mod terminal;
|
||||
@@ -116,3 +118,10 @@ pub use compact::content_items_to_text;
|
||||
pub use event_mapping::parse_turn_item;
|
||||
pub mod compact;
|
||||
pub mod otel_init;
|
||||
pub use tools::handlers::subagent::PageDirection;
|
||||
pub use tools::handlers::subagent::RenderedPage;
|
||||
pub use tools::handlers::subagent::SubagentActivity;
|
||||
pub use tools::handlers::subagent::classify_activity;
|
||||
pub use tools::handlers::subagent::render_logs_as_text;
|
||||
pub use tools::handlers::subagent::render_logs_as_text_with_max_lines;
|
||||
pub use tools::handlers::subagent::render_logs_payload;
|
||||
|
||||
@@ -76,7 +76,6 @@ macro_rules! model_family {
|
||||
(
|
||||
$slug:expr, $family:expr $(, $key:ident : $value:expr )* $(,)?
|
||||
) => {{
|
||||
let truncation_policy = TruncationPolicy::Bytes(10_000);
|
||||
// defaults
|
||||
#[allow(unused_mut)]
|
||||
let mut mf = ModelFamily {
|
||||
@@ -91,10 +90,10 @@ macro_rules! model_family {
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
shell_type: ConfigShellToolType::Default(truncation_policy),
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
default_verbosity: None,
|
||||
default_reasoning_effort: None,
|
||||
truncation_policy,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
};
|
||||
|
||||
// apply overrides
|
||||
@@ -139,7 +138,6 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("test-gpt-5") {
|
||||
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
@@ -152,13 +150,13 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
"test_sync_tool".to_string(),
|
||||
],
|
||||
supports_parallel_tool_calls: true,
|
||||
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
support_verbosity: true,
|
||||
truncation_policy: TruncationPolicy::Tokens(10_000),
|
||||
)
|
||||
|
||||
// Internal models.
|
||||
} else if slug.starts_with("codex-exp-") {
|
||||
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
@@ -170,44 +168,54 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
"list_dir".to_string(),
|
||||
"read_file".to_string(),
|
||||
],
|
||||
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
supports_parallel_tool_calls: true,
|
||||
support_verbosity: true,
|
||||
truncation_policy: truncation_policy,
|
||||
truncation_policy: TruncationPolicy::Tokens(10_000),
|
||||
)
|
||||
} else if slug.starts_with("exp-") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
support_verbosity: true,
|
||||
default_verbosity: Some(Verbosity::Low),
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
shell_type: ConfigShellToolType::UnifiedExec,
|
||||
supports_parallel_tool_calls: true,
|
||||
)
|
||||
|
||||
// Production models.
|
||||
} else if slug.starts_with("gpt-5.1-codex-max") {
|
||||
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
supports_parallel_tool_calls: true,
|
||||
support_verbosity: false,
|
||||
truncation_policy: truncation_policy,
|
||||
truncation_policy: TruncationPolicy::Tokens(10_000),
|
||||
)
|
||||
} else if slug.starts_with("gpt-5-codex")
|
||||
|| slug.starts_with("gpt-5.1-codex")
|
||||
|| slug.starts_with("codex-")
|
||||
{
|
||||
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
supports_parallel_tool_calls: true,
|
||||
support_verbosity: false,
|
||||
truncation_policy: truncation_policy,
|
||||
truncation_policy: TruncationPolicy::Tokens(10_000),
|
||||
)
|
||||
} else if slug.starts_with("gpt-5.1") {
|
||||
let truncation_policy = TruncationPolicy::Tokens(10_000);
|
||||
model_family!(
|
||||
slug, "gpt-5.1",
|
||||
supports_reasoning_summaries: true,
|
||||
@@ -217,7 +225,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
base_instructions: GPT_5_1_INSTRUCTIONS.to_string(),
|
||||
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
shell_type: ConfigShellToolType::ShellCommand(truncation_policy),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
supports_parallel_tool_calls: true,
|
||||
)
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
@@ -225,7 +233,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
slug, "gpt-5",
|
||||
supports_reasoning_summaries: true,
|
||||
needs_special_apply_patch_instructions: true,
|
||||
shell_type: ConfigShellToolType::Default(TruncationPolicy::Bytes(10_000)),
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
support_verbosity: true,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
)
|
||||
@@ -235,7 +243,6 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
}
|
||||
|
||||
pub fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
let truncation_policy = TruncationPolicy::Bytes(10_000);
|
||||
ModelFamily {
|
||||
slug: model.to_string(),
|
||||
family: model.to_string(),
|
||||
@@ -248,9 +255,9 @@ pub fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
shell_type: ConfigShellToolType::Default(truncation_policy),
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
default_verbosity: None,
|
||||
default_reasoning_effort: None,
|
||||
truncation_policy,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ use crate::model_family::ModelFamily;
|
||||
|
||||
// Shared constants for commonly used window/token sizes.
|
||||
pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000;
|
||||
pub(crate) const MAX_OUTPUT_TOKENS_128K: i64 = 128_000;
|
||||
|
||||
/// Metadata about a model, particularly OpenAI models.
|
||||
/// We may want to consider including details like the pricing for
|
||||
@@ -14,19 +13,15 @@ pub(crate) struct ModelInfo {
|
||||
/// Size of the context window in tokens. This is the maximum size of the input context.
|
||||
pub(crate) context_window: i64,
|
||||
|
||||
/// Maximum number of output tokens that can be generated for the model.
|
||||
pub(crate) max_output_tokens: i64,
|
||||
|
||||
/// Token threshold where we should automatically compact conversation history. This considers
|
||||
/// input tokens + output tokens of this turn.
|
||||
pub(crate) auto_compact_token_limit: Option<i64>,
|
||||
}
|
||||
|
||||
impl ModelInfo {
|
||||
const fn new(context_window: i64, max_output_tokens: i64) -> Self {
|
||||
const fn new(context_window: i64) -> Self {
|
||||
Self {
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
auto_compact_token_limit: Some(Self::default_auto_compact_limit(context_window)),
|
||||
}
|
||||
}
|
||||
@@ -42,48 +37,44 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
// OSS models have a 128k shared token pool.
|
||||
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
|
||||
// https://openai.com/index/gpt-oss-model-card/
|
||||
"gpt-oss-20b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-120b" => Some(ModelInfo::new(96_000, 32_000)),
|
||||
"gpt-oss-20b" => Some(ModelInfo::new(96_000)),
|
||||
"gpt-oss-120b" => Some(ModelInfo::new(96_000)),
|
||||
// https://platform.openai.com/docs/models/o3
|
||||
"o3" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o3" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/o4-mini
|
||||
"o4-mini" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"o4-mini" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/codex-mini-latest
|
||||
"codex-mini-latest" => Some(ModelInfo::new(200_000, 100_000)),
|
||||
"codex-mini-latest" => Some(ModelInfo::new(200_000)),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4.1 defaults to gpt-4.1-2025-04-14.
|
||||
// https://platform.openai.com/docs/models/gpt-4.1
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576, 32_768)),
|
||||
"gpt-4.1" | "gpt-4.1-2025-04-14" => Some(ModelInfo::new(1_047_576)),
|
||||
|
||||
// As of Jun 25, 2025, gpt-4o defaults to gpt-4o-2024-08-06.
|
||||
// https://platform.openai.com/docs/models/gpt-4o
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o" | "gpt-4o-2024-08-06" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-05-13
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000, 4_096)),
|
||||
"gpt-4o-2024-05-13" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-4o?snapshot=gpt-4o-2024-11-20
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000, 16_384)),
|
||||
"gpt-4o-2024-11-20" => Some(ModelInfo::new(128_000)),
|
||||
|
||||
// https://platform.openai.com/docs/models/gpt-3.5-turbo
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385)),
|
||||
|
||||
_ if slug.starts_with("gpt-5-codex")
|
||||
|| slug.starts_with("gpt-5.1-codex")
|
||||
|| slug.starts_with("gpt-5.1-codex-max") =>
|
||||
{
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K))
|
||||
}
|
||||
|
||||
_ if slug.starts_with("gpt-5") => {
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
}
|
||||
_ if slug.starts_with("gpt-5") => Some(ModelInfo::new(CONTEXT_WINDOW_272K)),
|
||||
|
||||
_ if slug.starts_with("codex-") => {
|
||||
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
|
||||
}
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo::new(CONTEXT_WINDOW_272K)),
|
||||
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -10,9 +8,7 @@ use tracing::warn;
|
||||
/// - `ResponseInputItem`s to send back to the model on the next turn.
|
||||
pub(crate) async fn process_items(
|
||||
processed_items: Vec<crate::codex::ProcessedResponseItem>,
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
) -> (Vec<ResponseInputItem>, Vec<ResponseItem>) {
|
||||
) -> (Vec<ResponseInputItem>, Vec<ResponseItem>, Vec<ResponseItem>) {
|
||||
let mut outputs_to_record = Vec::<ResponseItem>::new();
|
||||
let mut new_inputs_to_record = Vec::<ResponseItem>::new();
|
||||
let mut responses = Vec::<ResponseInputItem>::new();
|
||||
@@ -60,11 +56,5 @@ pub(crate) async fn process_items(
|
||||
outputs_to_record.push(item);
|
||||
}
|
||||
|
||||
let all_items_to_record = [outputs_to_record, new_inputs_to_record].concat();
|
||||
// Only attempt to take the lock if there is something to record.
|
||||
if !all_items_to_record.is_empty() {
|
||||
sess.record_conversation_items(turn_context, &all_items_to_record)
|
||||
.await;
|
||||
}
|
||||
(responses, all_items_to_record)
|
||||
(responses, outputs_to_record, new_inputs_to_record)
|
||||
}
|
||||
|
||||
@@ -84,6 +84,8 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::ItemCompleted(_)
|
||||
| EventMsg::AgentMessageContentDelta(_)
|
||||
| EventMsg::ReasoningContentDelta(_)
|
||||
| EventMsg::ReasoningRawContentDelta(_) => false,
|
||||
| EventMsg::ReasoningRawContentDelta(_)
|
||||
| EventMsg::AgentInbox(_) => false,
|
||||
EventMsg::SubagentLifecycle(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,8 +58,6 @@ pub struct CommandSpec {
|
||||
pub expiration: ExecExpiration,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub max_output_tokens: Option<usize>,
|
||||
pub max_output_chars: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -72,8 +70,6 @@ pub struct ExecEnv {
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub arg0: Option<String>,
|
||||
pub max_output_tokens: Option<usize>,
|
||||
pub max_output_chars: Option<usize>,
|
||||
}
|
||||
|
||||
pub enum SandboxPreference {
|
||||
@@ -188,8 +184,6 @@ impl SandboxManager {
|
||||
with_escalated_permissions: spec.with_escalated_permissions,
|
||||
justification: spec.justification,
|
||||
arg0: arg0_override,
|
||||
max_output_tokens: spec.max_output_tokens,
|
||||
max_output_chars: spec.max_output_chars,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
67
codex-rs/core/src/session_index.rs
Normal file
67
codex-rs/core/src/session_index.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::OnceLock;
|
||||
use std::sync::Weak;
|
||||
|
||||
use codex_protocol::ConversationId;
|
||||
|
||||
use crate::codex::Session;
|
||||
|
||||
struct IndexInner {
|
||||
map: HashMap<ConversationId, Weak<Session>>,
|
||||
}
|
||||
|
||||
impl IndexInner {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static INDEX: OnceLock<Mutex<IndexInner>> = OnceLock::new();
|
||||
|
||||
fn idx() -> &'static Mutex<IndexInner> {
|
||||
INDEX.get_or_init(|| Mutex::new(IndexInner::new()))
|
||||
}
|
||||
|
||||
pub(crate) fn register(conversation_id: ConversationId, session: &Arc<Session>) {
|
||||
if let Ok(mut guard) = idx().lock() {
|
||||
guard.map.insert(conversation_id, Arc::downgrade(session));
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get(conversation_id: &ConversationId) -> Option<Arc<Session>> {
|
||||
let mut guard = idx().lock().ok()?;
|
||||
match guard.map.get(conversation_id) {
|
||||
Some(w) => w.upgrade().or_else(|| {
|
||||
guard.map.remove(conversation_id);
|
||||
None
|
||||
}),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn prunes_stale_sessions() {
|
||||
let conversation_id = ConversationId::new();
|
||||
{
|
||||
let mut guard = idx().lock().unwrap();
|
||||
guard.map.insert(conversation_id, Weak::new());
|
||||
}
|
||||
|
||||
// First lookup should detect the dead weak ptr, prune it, and return None.
|
||||
assert!(get(&conversation_id).is_none());
|
||||
|
||||
// Second lookup should see the map entry removed.
|
||||
{
|
||||
let guard = idx().lock().unwrap();
|
||||
assert!(!guard.map.contains_key(&conversation_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@ use std::sync::Arc;
|
||||
use crate::AuthManager;
|
||||
use crate::RolloutRecorder;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::subagents::SubagentManager;
|
||||
use crate::subagents::SubagentRegistry;
|
||||
use crate::tools::sandboxing::ApprovalStore;
|
||||
use crate::unified_exec::UnifiedExecSessionManager;
|
||||
use crate::user_notification::UserNotifier;
|
||||
@@ -22,4 +24,6 @@ pub(crate) struct SessionServices {
|
||||
pub(crate) auth_manager: Arc<AuthManager>,
|
||||
pub(crate) otel_event_manager: OtelEventManager,
|
||||
pub(crate) tool_approvals: Mutex<ApprovalStore>,
|
||||
pub(crate) subagents: SubagentRegistry,
|
||||
pub(crate) subagent_manager: SubagentManager,
|
||||
}
|
||||
|
||||
3080
codex-rs/core/src/subagents/manager.rs
Normal file
3080
codex-rs/core/src/subagents/manager.rs
Normal file
File diff suppressed because it is too large
Load Diff
21
codex-rs/core/src/subagents/mod.rs
Normal file
21
codex-rs/core/src/subagents/mod.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
mod manager;
|
||||
mod registry;
|
||||
|
||||
pub use manager::AwaitInboxResult;
|
||||
pub use manager::AwaitResult;
|
||||
pub use manager::ForkRequest;
|
||||
pub use manager::InboxMessage;
|
||||
pub use manager::LogEntry;
|
||||
pub use manager::PruneErrorEntry;
|
||||
pub use manager::PruneReport;
|
||||
pub use manager::PruneRequest;
|
||||
pub use manager::SendMessageRequest;
|
||||
pub use manager::SpawnRequest;
|
||||
pub use manager::SubagentCompletion;
|
||||
pub use manager::SubagentManager;
|
||||
pub use manager::SubagentManagerError;
|
||||
pub use manager::WatchdogAction;
|
||||
pub use registry::SubagentMetadata;
|
||||
pub use registry::SubagentOrigin;
|
||||
pub use registry::SubagentRegistry;
|
||||
pub use registry::SubagentStatus;
|
||||
335
codex-rs/core/src/subagents/registry.rs
Normal file
335
codex-rs/core/src/subagents/registry.rs
Normal file
@@ -0,0 +1,335 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use codex_protocol::AgentId;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::SubagentLifecycleOrigin;
|
||||
use codex_protocol::protocol::SubagentLifecycleStatus;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SubagentOrigin {
|
||||
Spawn,
|
||||
Fork,
|
||||
SendMessage,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SubagentStatus {
|
||||
Queued,
|
||||
Running,
|
||||
Ready,
|
||||
Idle,
|
||||
Failed,
|
||||
Canceled,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct SubagentMetadata {
|
||||
pub agent_id: AgentId,
|
||||
pub parent_agent_id: Option<AgentId>,
|
||||
pub session_id: ConversationId,
|
||||
pub parent_session_id: Option<ConversationId>,
|
||||
pub origin: SubagentOrigin,
|
||||
pub initial_message_count: usize,
|
||||
pub status: SubagentStatus,
|
||||
#[serde(skip_serializing)]
|
||||
pub created_at: SystemTime,
|
||||
#[serde(skip_serializing)]
|
||||
pub created_at_ms: i64,
|
||||
#[serde(skip_serializing)]
|
||||
pub session_key: String,
|
||||
pub label: Option<String>,
|
||||
pub summary: Option<String>,
|
||||
pub reasoning_header: Option<String>,
|
||||
pub pending_messages: usize,
|
||||
pub pending_interrupts: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct SubagentRegistry {
|
||||
inner: Arc<RwLock<HashMap<ConversationId, SubagentMetadata>>>,
|
||||
}
|
||||
|
||||
impl SubagentMetadata {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn new(
|
||||
session_id: ConversationId,
|
||||
parent_session_id: Option<ConversationId>,
|
||||
agent_id: AgentId,
|
||||
parent_agent_id: Option<AgentId>,
|
||||
origin: SubagentOrigin,
|
||||
initial_message_count: usize,
|
||||
label: Option<String>,
|
||||
summary: Option<String>,
|
||||
) -> Self {
|
||||
let created_at = SystemTime::now();
|
||||
Self {
|
||||
agent_id,
|
||||
parent_agent_id,
|
||||
session_id,
|
||||
parent_session_id,
|
||||
origin,
|
||||
initial_message_count,
|
||||
status: SubagentStatus::Queued,
|
||||
created_at,
|
||||
created_at_ms: unix_time_millis(created_at),
|
||||
session_key: session_id.to_string(),
|
||||
label,
|
||||
summary,
|
||||
reasoning_header: None,
|
||||
pending_messages: 0,
|
||||
pending_interrupts: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SubagentMetadata {
|
||||
pub fn from_summary(summary: &codex_protocol::protocol::SubagentSummary) -> Self {
|
||||
let created_at = if summary.started_at_ms >= 0 {
|
||||
std::time::UNIX_EPOCH + std::time::Duration::from_millis(summary.started_at_ms as u64)
|
||||
} else {
|
||||
std::time::UNIX_EPOCH
|
||||
- std::time::Duration::from_millis(summary.started_at_ms.unsigned_abs())
|
||||
};
|
||||
SubagentMetadata {
|
||||
agent_id: summary.agent_id,
|
||||
parent_agent_id: summary.parent_agent_id,
|
||||
session_id: summary.session_id,
|
||||
parent_session_id: summary.parent_session_id,
|
||||
origin: SubagentOrigin::from(summary.origin),
|
||||
initial_message_count: 0,
|
||||
status: SubagentStatus::from(summary.status),
|
||||
created_at,
|
||||
created_at_ms: summary.started_at_ms,
|
||||
session_key: summary.session_id.to_string(),
|
||||
label: summary.label.clone(),
|
||||
summary: summary.summary.clone(),
|
||||
reasoning_header: summary.reasoning_header.clone(),
|
||||
pending_messages: summary.pending_messages,
|
||||
pending_interrupts: summary.pending_interrupts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SubagentLifecycleStatus> for SubagentStatus {
|
||||
fn from(status: SubagentLifecycleStatus) -> Self {
|
||||
match status {
|
||||
SubagentLifecycleStatus::Queued => SubagentStatus::Queued,
|
||||
SubagentLifecycleStatus::Running => SubagentStatus::Running,
|
||||
SubagentLifecycleStatus::Ready => SubagentStatus::Ready,
|
||||
SubagentLifecycleStatus::Idle => SubagentStatus::Idle,
|
||||
SubagentLifecycleStatus::Failed => SubagentStatus::Failed,
|
||||
SubagentLifecycleStatus::Canceled => SubagentStatus::Canceled,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SubagentLifecycleOrigin> for SubagentOrigin {
|
||||
fn from(origin: SubagentLifecycleOrigin) -> Self {
|
||||
match origin {
|
||||
SubagentLifecycleOrigin::Spawn => SubagentOrigin::Spawn,
|
||||
SubagentLifecycleOrigin::Fork => SubagentOrigin::Fork,
|
||||
SubagentLifecycleOrigin::SendMessage => SubagentOrigin::SendMessage,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SubagentRegistry {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn register_spawn(
|
||||
&self,
|
||||
session_id: ConversationId,
|
||||
parent_session_id: Option<ConversationId>,
|
||||
parent_agent_id: Option<AgentId>,
|
||||
agent_id: AgentId,
|
||||
initial_message_count: usize,
|
||||
label: Option<String>,
|
||||
summary: Option<String>,
|
||||
) -> SubagentMetadata {
|
||||
let metadata = SubagentMetadata::new(
|
||||
session_id,
|
||||
parent_session_id,
|
||||
agent_id,
|
||||
parent_agent_id,
|
||||
SubagentOrigin::Spawn,
|
||||
initial_message_count,
|
||||
label,
|
||||
summary,
|
||||
);
|
||||
self.insert_if_absent(metadata).await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn register_fork(
|
||||
&self,
|
||||
session_id: ConversationId,
|
||||
parent_session_id: ConversationId,
|
||||
parent_agent_id: Option<AgentId>,
|
||||
agent_id: AgentId,
|
||||
initial_message_count: usize,
|
||||
label: Option<String>,
|
||||
summary: Option<String>,
|
||||
) -> SubagentMetadata {
|
||||
let metadata = SubagentMetadata::new(
|
||||
session_id,
|
||||
Some(parent_session_id),
|
||||
agent_id,
|
||||
parent_agent_id,
|
||||
SubagentOrigin::Fork,
|
||||
initial_message_count,
|
||||
label,
|
||||
summary,
|
||||
);
|
||||
self.insert_if_absent(metadata).await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn register_resume(
|
||||
&self,
|
||||
session_id: ConversationId,
|
||||
parent_session_id: ConversationId,
|
||||
parent_agent_id: Option<AgentId>,
|
||||
agent_id: AgentId,
|
||||
initial_message_count: usize,
|
||||
label: Option<String>,
|
||||
summary: Option<String>,
|
||||
) -> SubagentMetadata {
|
||||
let metadata = SubagentMetadata::new(
|
||||
session_id,
|
||||
Some(parent_session_id),
|
||||
agent_id,
|
||||
parent_agent_id,
|
||||
SubagentOrigin::SendMessage,
|
||||
initial_message_count,
|
||||
label,
|
||||
summary,
|
||||
);
|
||||
self.insert_if_absent(metadata).await
|
||||
}
|
||||
|
||||
pub async fn update_status(
|
||||
&self,
|
||||
session_id: &ConversationId,
|
||||
status: SubagentStatus,
|
||||
) -> Option<SubagentMetadata> {
|
||||
let mut guard = self.inner.write().await;
|
||||
if let Some(entry) = guard.get_mut(session_id) {
|
||||
entry.status = status;
|
||||
return Some(entry.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn update_reasoning_header(
|
||||
&self,
|
||||
session_id: &ConversationId,
|
||||
header: String,
|
||||
) -> Option<SubagentMetadata> {
|
||||
let mut guard = self.inner.write().await;
|
||||
if let Some(entry) = guard.get_mut(session_id) {
|
||||
entry.reasoning_header = Some(header);
|
||||
return Some(entry.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn get(&self, session_id: &ConversationId) -> Option<SubagentMetadata> {
|
||||
let guard = self.inner.read().await;
|
||||
guard.get(session_id).cloned()
|
||||
}
|
||||
|
||||
pub async fn update_label_and_summary(
|
||||
&self,
|
||||
session_id: &ConversationId,
|
||||
label: Option<String>,
|
||||
summary: Option<String>,
|
||||
) -> Option<SubagentMetadata> {
|
||||
let mut guard = self.inner.write().await;
|
||||
if let Some(entry) = guard.get_mut(session_id) {
|
||||
entry.label = label;
|
||||
entry.summary = summary;
|
||||
return Some(entry.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn update_inbox_counts(
|
||||
&self,
|
||||
session_id: &ConversationId,
|
||||
pending_messages: usize,
|
||||
pending_interrupts: usize,
|
||||
) -> Option<SubagentMetadata> {
|
||||
let mut guard = self.inner.write().await;
|
||||
if let Some(entry) = guard.get_mut(session_id) {
|
||||
entry.pending_messages = pending_messages;
|
||||
entry.pending_interrupts = pending_interrupts;
|
||||
return Some(entry.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Vec<SubagentMetadata> {
|
||||
let guard = self.inner.read().await;
|
||||
let mut entries: Vec<SubagentMetadata> = guard.values().cloned().collect();
|
||||
entries.sort_by(|a, b| {
|
||||
a.created_at_ms
|
||||
.cmp(&b.created_at_ms)
|
||||
.then_with(|| a.session_key.cmp(&b.session_key))
|
||||
});
|
||||
entries
|
||||
}
|
||||
|
||||
pub async fn remove(&self, session_id: &ConversationId) -> Option<SubagentMetadata> {
|
||||
let mut guard = self.inner.write().await;
|
||||
guard.remove(session_id)
|
||||
}
|
||||
|
||||
/// Insert a fully-formed metadata entry (used when adopting children into a new
|
||||
/// parent session during a fork). This does not adjust timestamps or keys.
|
||||
pub async fn register_imported(&self, metadata: SubagentMetadata) -> SubagentMetadata {
|
||||
let mut guard = self.inner.write().await;
|
||||
guard.insert(metadata.session_id, metadata.clone());
|
||||
metadata
|
||||
}
|
||||
|
||||
pub async fn prune<F>(&self, mut predicate: F) -> Vec<ConversationId>
|
||||
where
|
||||
F: FnMut(&SubagentMetadata) -> bool,
|
||||
{
|
||||
let mut guard = self.inner.write().await;
|
||||
let ids: Vec<ConversationId> = guard
|
||||
.iter()
|
||||
.filter_map(|(id, meta)| if predicate(meta) { Some(*id) } else { None })
|
||||
.collect();
|
||||
for id in &ids {
|
||||
guard.remove(id);
|
||||
}
|
||||
ids
|
||||
}
|
||||
async fn insert_if_absent(&self, metadata: SubagentMetadata) -> SubagentMetadata {
|
||||
let mut guard = self.inner.write().await;
|
||||
if let Some(existing) = guard.get(&metadata.session_id) {
|
||||
return existing.clone();
|
||||
}
|
||||
let session_id = metadata.session_id;
|
||||
guard.insert(session_id, metadata.clone());
|
||||
metadata
|
||||
}
|
||||
}
|
||||
|
||||
fn unix_time_millis(time: SystemTime) -> i64 {
|
||||
match time.duration_since(UNIX_EPOCH) {
|
||||
Ok(duration) => duration.as_millis() as i64,
|
||||
Err(err) => -(err.duration().as_millis() as i64),
|
||||
}
|
||||
}
|
||||
@@ -18,14 +18,19 @@ use tracing::warn;
|
||||
|
||||
use crate::AuthManager;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::SessionSettingsUpdate;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::parse_turn_item;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ItemCompletedEvent;
|
||||
use crate::protocol::ItemStartedEvent;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TurnAbortReason;
|
||||
use crate::protocol::TurnAbortedEvent;
|
||||
use crate::state::ActiveTurn;
|
||||
use crate::state::RunningTask;
|
||||
use crate::state::TaskKind;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
|
||||
pub(crate) use compact::CompactTask;
|
||||
@@ -150,6 +155,61 @@ impl Session {
|
||||
self.register_new_active_task(running_task).await;
|
||||
}
|
||||
|
||||
/// Start a new model turn driven by inbox-derived items (e.g.,
|
||||
/// synthetic `subagent_await` call/output pairs) without fabricating
|
||||
/// additional user text. The model will see the updated history and
|
||||
/// continue from there.
|
||||
pub async fn autosubmit_inbox_task(self: &Arc<Self>, items: Vec<ResponseItem>) {
|
||||
if items.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let turn_context = self.new_turn(SessionSettingsUpdate::default()).await;
|
||||
|
||||
// Emit started/completed events for synthetic tool calls so UIs render them.
|
||||
for item in &items {
|
||||
if let Some(turn_item) = parse_turn_item(item) {
|
||||
match item {
|
||||
ResponseItem::FunctionCall { .. } => {
|
||||
self.send_event(
|
||||
turn_context.as_ref(),
|
||||
EventMsg::ItemStarted(ItemStartedEvent {
|
||||
thread_id: self.conversation_id(),
|
||||
turn_id: turn_context.sub_id.clone(),
|
||||
item: turn_item.clone(),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { .. } => {
|
||||
self.send_event(
|
||||
turn_context.as_ref(),
|
||||
EventMsg::ItemCompleted(ItemCompletedEvent {
|
||||
thread_id: self.conversation_id(),
|
||||
turn_id: turn_context.sub_id.clone(),
|
||||
item: turn_item.clone(),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.record_conversation_items(&turn_context, &items).await;
|
||||
|
||||
// Kick off a RegularTask with no additional user input; `run_task`
|
||||
// will treat this as an assistant-only turn based on existing
|
||||
// history plus the inbox-derived items.
|
||||
self.spawn_task(
|
||||
Arc::clone(&turn_context),
|
||||
Vec::new(),
|
||||
crate::tasks::RegularTask,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn abort_all_tasks(self: &Arc<Self>, reason: TurnAbortReason) {
|
||||
for task in self.take_all_running_tasks().await {
|
||||
self.handle_task_abort(task, reason.clone()).await;
|
||||
@@ -168,6 +228,7 @@ impl Session {
|
||||
*active = None;
|
||||
}
|
||||
drop(active);
|
||||
|
||||
let event = EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message });
|
||||
self.send_event(turn_context.as_ref(), event).await;
|
||||
}
|
||||
|
||||
@@ -28,6 +28,6 @@ impl SessionTask for RegularTask {
|
||||
cancellation_token: CancellationToken,
|
||||
) -> Option<String> {
|
||||
let sess = session.clone_session();
|
||||
run_task(sess, ctx, input, cancellation_token).await
|
||||
run_task(sess, ctx, input, true, cancellation_token).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +86,7 @@ impl SessionTask for UserShellCommandTask {
|
||||
cwd: cwd.clone(),
|
||||
parsed_cmd: parsed_cmd.clone(),
|
||||
source: ExecCommandSource::UserShell,
|
||||
is_user_shell_command: true,
|
||||
interaction_input: None,
|
||||
}),
|
||||
)
|
||||
@@ -102,8 +103,6 @@ impl SessionTask for UserShellCommandTask {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let stdout_stream = Some(StdoutStream {
|
||||
|
||||
@@ -15,8 +15,6 @@ use crate::protocol::PatchApplyEndEvent;
|
||||
use crate::protocol::TurnDiffEvent;
|
||||
use crate::tools::context::SharedTurnDiffTracker;
|
||||
use crate::tools::sandboxing::ToolError;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::formatted_truncate_text;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
@@ -31,7 +29,6 @@ pub(crate) struct ToolEventCtx<'a> {
|
||||
pub turn: &'a TurnContext,
|
||||
pub call_id: &'a str,
|
||||
pub turn_diff_tracker: Option<&'a SharedTurnDiffTracker>,
|
||||
pub override_truncation_policy: Option<&'a TruncationPolicy>,
|
||||
}
|
||||
|
||||
impl<'a> ToolEventCtx<'a> {
|
||||
@@ -40,14 +37,12 @@ impl<'a> ToolEventCtx<'a> {
|
||||
turn: &'a TurnContext,
|
||||
call_id: &'a str,
|
||||
turn_diff_tracker: Option<&'a SharedTurnDiffTracker>,
|
||||
override_truncation_policy: Option<&'a TruncationPolicy>,
|
||||
) -> Self {
|
||||
Self {
|
||||
session,
|
||||
turn,
|
||||
call_id,
|
||||
turn_diff_tracker,
|
||||
override_truncation_policy,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -81,6 +76,7 @@ pub(crate) async fn emit_exec_command_begin(
|
||||
cwd: cwd.to_path_buf(),
|
||||
parsed_cmd: parsed_cmd.to_vec(),
|
||||
source,
|
||||
is_user_shell_command: matches!(source, ExecCommandSource::UserShell),
|
||||
interaction_input,
|
||||
}),
|
||||
)
|
||||
@@ -260,13 +256,13 @@ impl ToolEmitter {
|
||||
fn format_exec_output_for_model(
|
||||
&self,
|
||||
output: &ExecToolCallOutput,
|
||||
truncation_policy: &TruncationPolicy,
|
||||
ctx: ToolEventCtx<'_>,
|
||||
) -> String {
|
||||
match self {
|
||||
Self::Shell { freeform: true, .. } => {
|
||||
super::format_exec_output_for_model_freeform(output, *truncation_policy)
|
||||
super::format_exec_output_for_model_freeform(output, ctx.turn.truncation_policy)
|
||||
}
|
||||
_ => super::format_exec_output_for_model_structured(output, *truncation_policy),
|
||||
_ => super::format_exec_output_for_model_structured(output, ctx.turn.truncation_policy),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,12 +271,9 @@ impl ToolEmitter {
|
||||
ctx: ToolEventCtx<'_>,
|
||||
out: Result<ExecToolCallOutput, ToolError>,
|
||||
) -> Result<String, FunctionCallError> {
|
||||
let truncation_policy = ctx
|
||||
.override_truncation_policy
|
||||
.unwrap_or(&ctx.turn.truncation_policy);
|
||||
let (event, result) = match out {
|
||||
Ok(output) => {
|
||||
let content = self.format_exec_output_for_model(&output, truncation_policy);
|
||||
let content = self.format_exec_output_for_model(&output, ctx);
|
||||
let exit_code = output.exit_code;
|
||||
let event = ToolEventStage::Success(output);
|
||||
let result = if exit_code == 0 {
|
||||
@@ -292,26 +285,24 @@ impl ToolEmitter {
|
||||
}
|
||||
Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Timeout { output })))
|
||||
| Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output }))) => {
|
||||
let response = self.format_exec_output_for_model(&output, truncation_policy);
|
||||
let response = self.format_exec_output_for_model(&output, ctx);
|
||||
let event = ToolEventStage::Failure(ToolEventFailure::Output(*output));
|
||||
let result = Err(FunctionCallError::RespondToModel(response));
|
||||
(event, result)
|
||||
}
|
||||
Err(ToolError::Codex(err)) => {
|
||||
let formatted_error = formatted_truncate_text(&err.to_string(), *truncation_policy);
|
||||
let message = format!("execution error: {formatted_error}");
|
||||
let event = ToolEventStage::Failure(ToolEventFailure::Message(message));
|
||||
let result = Err(FunctionCallError::RespondToModel(formatted_error));
|
||||
let message = format!("execution error: {err:?}");
|
||||
let event = ToolEventStage::Failure(ToolEventFailure::Message(message.clone()));
|
||||
let result = Err(FunctionCallError::RespondToModel(message));
|
||||
(event, result)
|
||||
}
|
||||
Err(ToolError::Rejected(msg)) => {
|
||||
let formatted_msg = formatted_truncate_text(&msg, *truncation_policy);
|
||||
// Normalize common rejection messages for exec tools so tests and
|
||||
// users see a clear, consistent phrase.
|
||||
let normalized = if formatted_msg == "rejected by user" {
|
||||
let normalized = if msg == "rejected by user" {
|
||||
"exec command rejected by user".to_string()
|
||||
} else {
|
||||
formatted_msg
|
||||
msg
|
||||
};
|
||||
let event = ToolEventStage::Failure(ToolEventFailure::Message(normalized.clone()));
|
||||
let result = Err(FunctionCallError::RespondToModel(normalized));
|
||||
|
||||
@@ -100,7 +100,6 @@ impl ToolHandler for ApplyPatchHandler {
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
Some(&tracker),
|
||||
None,
|
||||
);
|
||||
emitter.begin(event_ctx).await;
|
||||
|
||||
@@ -128,7 +127,6 @@ impl ToolHandler for ApplyPatchHandler {
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
Some(&tracker),
|
||||
None,
|
||||
);
|
||||
let content = emitter.finish(event_ctx, out).await?;
|
||||
Ok(ToolOutput::Function {
|
||||
|
||||
@@ -6,6 +6,7 @@ mod mcp_resource;
|
||||
mod plan;
|
||||
mod read_file;
|
||||
mod shell;
|
||||
pub mod subagent;
|
||||
mod test_sync;
|
||||
mod unified_exec;
|
||||
mod view_image;
|
||||
@@ -21,6 +22,7 @@ pub use plan::PlanHandler;
|
||||
pub use read_file::ReadFileHandler;
|
||||
pub use shell::ShellCommandHandler;
|
||||
pub use shell::ShellHandler;
|
||||
pub use subagent::SubagentToolHandler;
|
||||
pub use test_sync::TestSyncHandler;
|
||||
pub use unified_exec::UnifiedExecHandler;
|
||||
pub use view_image::ViewImageHandler;
|
||||
|
||||
@@ -27,7 +27,6 @@ use crate::tools::runtimes::apply_patch::ApplyPatchRuntime;
|
||||
use crate::tools::runtimes::shell::ShellRequest;
|
||||
use crate::tools::runtimes::shell::ShellRuntime;
|
||||
use crate::tools::sandboxing::ToolCtx;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
|
||||
pub struct ShellHandler;
|
||||
|
||||
@@ -43,8 +42,6 @@ impl ShellHandler {
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
arg0: None,
|
||||
max_output_tokens: params.max_output_tokens,
|
||||
max_output_chars: params.max_output_chars,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,8 +64,6 @@ impl ShellCommandHandler {
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
arg0: None,
|
||||
max_output_tokens: params.max_output_tokens,
|
||||
max_output_chars: params.max_output_chars,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -214,9 +209,6 @@ impl ShellHandler {
|
||||
)));
|
||||
}
|
||||
|
||||
let override_truncation_policy =
|
||||
create_truncation_policy(exec_params.max_output_tokens, exec_params.max_output_chars);
|
||||
|
||||
// Intercept apply_patch if present.
|
||||
match codex_apply_patch::maybe_parse_apply_patch_verified(
|
||||
&exec_params.command,
|
||||
@@ -245,7 +237,6 @@ impl ShellHandler {
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
Some(&tracker),
|
||||
override_truncation_policy.as_ref(),
|
||||
);
|
||||
emitter.begin(event_ctx).await;
|
||||
|
||||
@@ -272,7 +263,6 @@ impl ShellHandler {
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
Some(&tracker),
|
||||
override_truncation_policy.as_ref(),
|
||||
);
|
||||
let content = emitter.finish(event_ctx, out).await?;
|
||||
return Ok(ToolOutput::Function {
|
||||
@@ -304,13 +294,7 @@ impl ShellHandler {
|
||||
source,
|
||||
freeform,
|
||||
);
|
||||
let event_ctx = ToolEventCtx::new(
|
||||
session.as_ref(),
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
None,
|
||||
override_truncation_policy.as_ref(),
|
||||
);
|
||||
let event_ctx = ToolEventCtx::new(session.as_ref(), turn.as_ref(), &call_id, None);
|
||||
emitter.begin(event_ctx).await;
|
||||
|
||||
let req = ShellRequest {
|
||||
@@ -320,8 +304,6 @@ impl ShellHandler {
|
||||
env: exec_params.env.clone(),
|
||||
with_escalated_permissions: exec_params.with_escalated_permissions,
|
||||
justification: exec_params.justification.clone(),
|
||||
max_output_tokens: exec_params.max_output_tokens,
|
||||
max_output_chars: exec_params.max_output_chars,
|
||||
approval_requirement: create_approval_requirement_for_command(
|
||||
&turn.exec_policy,
|
||||
&exec_params.command,
|
||||
@@ -341,13 +323,7 @@ impl ShellHandler {
|
||||
let out = orchestrator
|
||||
.run(&mut runtime, &req, &tool_ctx, &turn, turn.approval_policy)
|
||||
.await;
|
||||
let event_ctx = ToolEventCtx::new(
|
||||
session.as_ref(),
|
||||
turn.as_ref(),
|
||||
&call_id,
|
||||
None,
|
||||
override_truncation_policy.as_ref(),
|
||||
);
|
||||
let event_ctx = ToolEventCtx::new(session.as_ref(), turn.as_ref(), &call_id, None);
|
||||
let content = emitter.finish(event_ctx, out).await?;
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
@@ -357,16 +333,6 @@ impl ShellHandler {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_truncation_policy(
|
||||
max_output_tokens: Option<usize>,
|
||||
max_output_chars: Option<usize>,
|
||||
) -> Option<TruncationPolicy> {
|
||||
if let Some(max_output_tokens) = max_output_tokens {
|
||||
Some(TruncationPolicy::Tokens(max_output_tokens))
|
||||
} else {
|
||||
max_output_chars.map(TruncationPolicy::Bytes)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
2028
codex-rs/core/src/tools/handlers/subagent.rs
Normal file
2028
codex-rs/core/src/tools/handlers/subagent.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -162,7 +162,6 @@ impl ToolHandler for UnifiedExecHandler {
|
||||
context.turn.as_ref(),
|
||||
&context.call_id,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let emitter = ToolEmitter::unified_exec(
|
||||
&command,
|
||||
|
||||
@@ -116,8 +116,6 @@ impl ToolRouter {
|
||||
timeout_ms: exec.timeout_ms,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
Ok(Some(ToolCall {
|
||||
tool_name: "local_shell".to_string(),
|
||||
|
||||
@@ -72,8 +72,6 @@ impl ApplyPatchRuntime {
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ pub mod unified_exec;
|
||||
|
||||
/// Shared helper to construct a CommandSpec from a tokenized command line.
|
||||
/// Validates that at least a program is present.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn build_command_spec(
|
||||
command: &[String],
|
||||
cwd: &Path,
|
||||
@@ -24,8 +23,6 @@ pub(crate) fn build_command_spec(
|
||||
expiration: ExecExpiration,
|
||||
with_escalated_permissions: Option<bool>,
|
||||
justification: Option<String>,
|
||||
max_output_tokens: Option<usize>,
|
||||
max_output_chars: Option<usize>,
|
||||
) -> Result<CommandSpec, ToolError> {
|
||||
let (program, args) = command
|
||||
.split_first()
|
||||
@@ -38,7 +35,5 @@ pub(crate) fn build_command_spec(
|
||||
expiration,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
max_output_tokens,
|
||||
max_output_chars,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,8 +31,6 @@ pub struct ShellRequest {
|
||||
pub env: std::collections::HashMap<String, String>,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub max_output_tokens: Option<usize>,
|
||||
pub max_output_chars: Option<usize>,
|
||||
pub approval_requirement: ApprovalRequirement,
|
||||
}
|
||||
|
||||
@@ -138,8 +136,6 @@ impl ToolRuntime<ShellRequest, ExecToolCallOutput> for ShellRuntime {
|
||||
req.timeout_ms.into(),
|
||||
req.with_escalated_permissions,
|
||||
req.justification.clone(),
|
||||
req.max_output_tokens,
|
||||
req.max_output_chars,
|
||||
)?;
|
||||
let env = attempt
|
||||
.env_for(spec)
|
||||
|
||||
@@ -35,8 +35,6 @@ pub struct UnifiedExecRequest {
|
||||
pub env: HashMap<String, String>,
|
||||
pub with_escalated_permissions: Option<bool>,
|
||||
pub justification: Option<String>,
|
||||
pub max_output_tokens: Option<usize>,
|
||||
pub max_output_chars: Option<usize>,
|
||||
pub approval_requirement: ApprovalRequirement,
|
||||
}
|
||||
|
||||
@@ -75,8 +73,6 @@ impl UnifiedExecRequest {
|
||||
env,
|
||||
with_escalated_permissions,
|
||||
justification,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
approval_requirement,
|
||||
}
|
||||
}
|
||||
@@ -158,8 +154,6 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecSession> for UnifiedExecRunt
|
||||
ExecExpiration::DefaultTimeout,
|
||||
req.with_escalated_permissions,
|
||||
req.justification.clone(),
|
||||
req.max_output_tokens,
|
||||
req.max_output_chars,
|
||||
)
|
||||
.map_err(|_| ToolError::Rejected("missing command line for PTY".to_string()))?;
|
||||
let exec_env = attempt
|
||||
|
||||
@@ -8,7 +8,6 @@ use crate::tools::handlers::apply_patch::ApplyPatchToolType;
|
||||
use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tools::handlers::apply_patch::create_apply_patch_json_tool;
|
||||
use crate::tools::registry::ToolRegistryBuilder;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value as JsonValue;
|
||||
@@ -18,7 +17,7 @@ use std::collections::HashMap;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum ConfigShellToolType {
|
||||
Default(TruncationPolicy),
|
||||
Default,
|
||||
Local,
|
||||
UnifiedExec,
|
||||
/// Do not include a shell tool by default. Useful when using Codex
|
||||
@@ -27,7 +26,7 @@ pub enum ConfigShellToolType {
|
||||
/// to customize agent behavior.
|
||||
Disabled,
|
||||
/// Takes a command as a single string to be run in the user's default shell.
|
||||
ShellCommand(TruncationPolicy),
|
||||
ShellCommand,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -36,6 +35,7 @@ pub(crate) struct ToolsConfig {
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
pub web_search_request: bool,
|
||||
pub include_view_image_tool: bool,
|
||||
pub include_subagent_tools: bool,
|
||||
pub experimental_supported_tools: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -53,10 +53,12 @@ impl ToolsConfig {
|
||||
let include_apply_patch_tool = features.enabled(Feature::ApplyPatchFreeform);
|
||||
let include_web_search_request = features.enabled(Feature::WebSearchRequest);
|
||||
let include_view_image_tool = features.enabled(Feature::ViewImageTool);
|
||||
let include_subagent_tools = features.enabled(Feature::SubagentTools);
|
||||
let experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
|
||||
|
||||
let shell_type = if !features.enabled(Feature::ShellTool) {
|
||||
ConfigShellToolType::Disabled
|
||||
} else if features.enabled(Feature::UnifiedExec) {
|
||||
} else if experimental_unified_exec_tool {
|
||||
ConfigShellToolType::UnifiedExec
|
||||
} else {
|
||||
model_family.shell_type.clone()
|
||||
@@ -79,6 +81,7 @@ impl ToolsConfig {
|
||||
apply_patch_tool_type,
|
||||
web_search_request: include_web_search_request,
|
||||
include_view_image_tool,
|
||||
include_subagent_tools,
|
||||
experimental_supported_tools: model_family.experimental_supported_tools.clone(),
|
||||
}
|
||||
}
|
||||
@@ -265,7 +268,7 @@ fn create_write_stdin_tool() -> ToolSpec {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_shell_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
fn create_shell_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -281,7 +284,7 @@ fn create_shell_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
"timeout_s".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
@@ -290,33 +293,23 @@ fn create_shell_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
|
||||
description: Some(
|
||||
"Whether to request escalated permissions. Set to true if \
|
||||
command needs to be run without sandbox restrictions"
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
description: Some(
|
||||
"Only set if with_escalated_permissions is true. 1-sentence \
|
||||
explanation of why we want to run this command."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
match truncation_policy {
|
||||
TruncationPolicy::Tokens(_) => {
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Maximum number of tokens to return from stdout/stderr. Excess tokens will be truncated".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
TruncationPolicy::Bytes(_) => {
|
||||
properties.insert(
|
||||
"max_output_chars".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Maximum number of characters to return from stdout/stderr. Excess characters will be truncated".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let description = if cfg!(windows) {
|
||||
r#"Runs a Powershell command (Windows) and returns its output. Arguments to `shell` will be passed to CreateProcessW(). Most commands should be prefixed with ["powershell.exe", "-Command"].
|
||||
@@ -347,7 +340,7 @@ Examples of valid command strings:
|
||||
})
|
||||
}
|
||||
|
||||
fn create_shell_command_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
fn create_shell_command_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -364,7 +357,7 @@ fn create_shell_command_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
"timeout_s".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
@@ -381,30 +374,6 @@ fn create_shell_command_tool(truncation_policy: TruncationPolicy) -> ToolSpec {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
},
|
||||
);
|
||||
match truncation_policy {
|
||||
TruncationPolicy::Tokens(_) => {
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Maximum number of tokens to return. Excess output will be truncated."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
TruncationPolicy::Bytes(_) => {
|
||||
properties.insert(
|
||||
"max_output_chars".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Maximum number of tokens to return. Excess output will be truncated."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let description = if cfg!(windows) {
|
||||
r#"Runs a Powershell command (Windows) and returns its output.
|
||||
@@ -493,7 +462,7 @@ fn create_test_sync_tool() -> ToolSpec {
|
||||
},
|
||||
);
|
||||
barrier_properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
"timeout_s".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Maximum time in milliseconds to wait at the barrier".to_string()),
|
||||
},
|
||||
@@ -656,9 +625,9 @@ fn create_read_file_tool() -> ToolSpec {
|
||||
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "read_file".to_string(),
|
||||
description:
|
||||
"Reads a local file with 1-indexed line numbers, supporting slice and indentation-aware block modes."
|
||||
.to_string(),
|
||||
description: "Reads a local file with 1-indexed line numbers, \
|
||||
supporting slice and indentation-aware block modes."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
@@ -668,6 +637,402 @@ fn create_read_file_tool() -> ToolSpec {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_spawn_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"prompt".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Initial prompt for a brand-new, context-free subagent.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"model".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional model override for this subagent (e.g., `gpt-5-codex`, `gpt-5`). \
|
||||
Must be a valid, supported model id."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"label".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Optional short name for this subagent.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"sandbox_mode".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional sandbox mode override (downgrade-only: request \
|
||||
`read_only` or `workspace_write`; you can never escalate to a \
|
||||
less-restricted sandbox)."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_spawn".to_string(),
|
||||
description: "Spawn a brand-new, context-free subagent for impartial reviews or \
|
||||
isolated tasks. Provide a detailed prompt, optionally label the child, \
|
||||
optionally set the model, and optionally request sandbox downgrades to `read_only` or \
|
||||
`workspace_write`. Each spawn consumes one of the 8 active-child \
|
||||
slots until you prune/cancel it, so reserve this for \
|
||||
work that benefits from a fresh context. Prefer `gpt-5` for \
|
||||
planning and research and `gpt-5-codex` for code reading and writing."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["prompt".to_string()]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_fork_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"prompt".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Optional prompt to hand to the forked child.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"model".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional model override for this forked subagent (e.g., `gpt-5-codex`, `gpt-5`). \
|
||||
Must be a valid, supported model id."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"label".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Optional short name for this fork.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"sandbox_mode".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional sandbox mode override (downgrade-only: request \
|
||||
`read_only` or `workspace_write`; danger mode is never granted)."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_fork".to_string(),
|
||||
description: "Fork the current session (think POSIX `fork`): both parent and \
|
||||
child observe the same tool call/return. The parent payload includes \
|
||||
the new `child_session_id` with `role: parent`, while the child sees \
|
||||
`role: child`. Use forks when the subagent needs your full \
|
||||
conversation history (spawn stays blank-slate). Each fork also counts \
|
||||
toward the 8-child cap until you prune or cancel it. `gpt-5` excels at \
|
||||
planning/reviews, while `gpt-5-codex` handles code edits. You may only \
|
||||
request sandbox downgrades to `read_only` or `workspace_write`."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec![]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_send_message_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"prompt".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional follow-up question or task; omit to simply wake the \
|
||||
agent to continue running from its prior state."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"label".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Optional new label.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"agent_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Numeric agent_id (from `subagent_list`) confirming which agent you intend to target."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"interrupt".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some(
|
||||
"Set true to mark this message as an interrupt so the child halts its current task before processing the prompt.".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_send_message".to_string(),
|
||||
description: "Send a short status update, summary, or follow-up task to another agent you can see in `subagent_list`. \
|
||||
The target `agent_id` must be echoed exactly so Codex can reject stale lookups. Provide a new prompt to ask for \
|
||||
more work or to share what you have done so far; omit the prompt if you only need to rename the agent or wake it \
|
||||
without new work. Set `interrupt=true` to preempt the agent before delivering the payload; interrupts are only \
|
||||
honored for non-root agents. Agents retain their existing sandbox; you may only request downgrades to `read_only` \
|
||||
or `workspace_write` when spawning or forking. Each child stores only the latest 200 log events, so pair this with \
|
||||
`subagent_logs` for progress checks. Use `subagent_send_message` whenever you want another agent (including the \
|
||||
root) to see your progress or recommendations, without blocking on `subagent_await`."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: None,
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_list_tool() -> ToolSpec {
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_list".to_string(),
|
||||
description: "List the agents you can currently observe plus their metadata. Each entry \
|
||||
includes the numeric `agent_id`, optional `parent_agent_id`, `session_id`, `label` (display name), \
|
||||
`summary`, `origin` (spawn | fork | send_message), `status`, `reasoning_header`, \
|
||||
`started_at_ms` (creation time), `initial_message_count`, the parent session id, and the \
|
||||
current inbox counters (`pending_messages`, `pending_interrupts`). Status is one of `queued` (launching), `running` \
|
||||
(actively working), `ready` (waiting for a new prompt or for you to \
|
||||
read its completion), `idle` (you already awaited the result), \
|
||||
`failed`, or `canceled`. `idle`/`failed`/`canceled` agents are \
|
||||
pruneable; `queued`/`running`/`ready` count against the 8-active-child \
|
||||
limit, so consult this list before every send/await/logs call to keep headroom."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties: BTreeMap::new(),
|
||||
required: Some(vec![]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_await_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"timeout_s".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Optional timeout in seconds (max 1,800 s / 30 minutes). Omit or set to 0 to use the 30-minute default; prefer at least 300 s so you are not busy-waiting."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_await".to_string(),
|
||||
description: "Drain the inbox for another agent and observe any terminal completion. `subagent_await` is \
|
||||
the sole delivery mechanism for cross-agent messages: each call returns a `messages` array (with sender \
|
||||
and recipient ids) plus an optional `completion` object when the child has reached a terminal state. \
|
||||
Successful calls move the agent’s status to `idle`, `failed`, or `canceled` when a completion is present, \
|
||||
but the agent remains listed until you explicitly run `subagent_prune`. Even though the root thread may \
|
||||
inject synthetic `subagent_await` results at turn boundaries, you should continue polling this tool with \
|
||||
short timeouts (e.g., 30s → 60s → 120s) so you can react to sibling messages and send interrupts without \
|
||||
waiting for completions. Provide `timeout_s` (capped at 30 minutes / 1,800 s) to bound how long you \
|
||||
wait (omit/0 uses the 30-minute default; minimum recommended 300 s)—timeouts leave the agent in its current status and return `timed_out=true` with an empty `messages` array."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: None,
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_watchdog_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"agent_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Target agent id (0 targets the root agent).".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"interval_s".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"Optional ping interval in seconds (minimum 30, default 300).".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"message".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional message template for each ping; defaults to a status/progress prompt."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"cancel".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("If true, cancel the existing watchdog for this agent instead of starting/replacing it.".to_string()),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_watchdog".to_string(),
|
||||
description: "Start, replace, or cancel a background watchdog (timer, like JS `setInterval`) that sends periodic inbox pings to an agent (including agent 0/root). Watchdogs are not subagents and do not consume subagent slots; they run inside the current session and enqueue messages on a configurable interval.".to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["agent_id".to_string()]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_prune_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"agent_ids".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::Number {
|
||||
description: Some("Agent id".to_string()),
|
||||
}),
|
||||
description: Some(
|
||||
"Specific agents to prune; omit to prune all \
|
||||
completed agents you can see."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"all".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("If true, prune all completed agents you can see.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"completed_only".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Must be true or omitted.".to_string()),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_prune".to_string(),
|
||||
description: "Prune completed subagents (specific agent ids or \
|
||||
everyone you can observe). Only agents whose `status` is `idle`, \
|
||||
`failed`, or `canceled` are eligible—use `subagent_await` or \
|
||||
`subagent_cancel` first to move `queued`/`running`/`ready` \
|
||||
agents into a terminal state. `subagent_await` and \
|
||||
`subagent_cancel` do not remove entries by themselves, \
|
||||
so pruning is the only way to free the concurrency slot. \
|
||||
Run prune regularly so finished work disappears from the UI \
|
||||
and you stay under the 8-child cap."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec![]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_logs_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"agent_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Numeric child agent id to inspect.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"limit".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Max events to return (default 5).".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"max_bytes".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Optional byte cap for returned events.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"since_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("If set, only return events with timestamp >= since_ms.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"before_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"If set, only return events with timestamp < before_ms (default is 'now' when omitted).".
|
||||
to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_logs".to_string(),
|
||||
description: "Peek recent events from another agent without blocking. Each \
|
||||
agent keeps only the latest ~200 events, so use `limit` (default 5), \
|
||||
`max_bytes`, `since_ms` (for forward paging) and `before_ms` (for backward \
|
||||
paging) to page through the log buffer. This call never consumes the \
|
||||
final completion—use it while the agent is `queued` or `running` to \
|
||||
diagnose progress before deciding between `await`, `send_message`, \
|
||||
or `cancel`."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["agent_id".to_string()]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_subagent_cancel_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"agent_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Numeric agent id to cancel.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"reason".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional note that explains why the child was canceled (surfaced to humans)."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "subagent_cancel".to_string(),
|
||||
description: "Stop a queued/running/ready agent \
|
||||
immediately. Use cancel when you need to abort in-flight work; \
|
||||
follow it with `subagent_prune` once the status is `canceled` so \
|
||||
the slot becomes available."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["agent_id".to_string()]),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_list_dir_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
@@ -701,9 +1066,9 @@ fn create_list_dir_tool() -> ToolSpec {
|
||||
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "list_dir".to_string(),
|
||||
description:
|
||||
"Lists entries in a local directory with 1-indexed entry numbers and simple type labels."
|
||||
.to_string(),
|
||||
description: "Lists entries in a local directory with 1-indexed entry \
|
||||
numbers and simple type labels."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
@@ -719,7 +1084,8 @@ fn create_list_mcp_resources_tool() -> ToolSpec {
|
||||
"server".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional MCP server name. When omitted, lists resources from every configured server."
|
||||
"Optional MCP server name. When omitted, lists resources from \
|
||||
every configured server."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
@@ -728,7 +1094,8 @@ fn create_list_mcp_resources_tool() -> ToolSpec {
|
||||
"cursor".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Opaque cursor returned by a previous list_mcp_resources call for the same server."
|
||||
"Opaque cursor returned by a previous list_mcp_resources call \
|
||||
for the same server."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
@@ -736,7 +1103,11 @@ fn create_list_mcp_resources_tool() -> ToolSpec {
|
||||
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "list_mcp_resources".to_string(),
|
||||
description: "Lists resources provided by MCP servers. Resources allow servers to share data that provides context to language models, such as files, database schemas, or application-specific information. Prefer resources over web search when possible.".to_string(),
|
||||
description: "Lists resources provided by MCP servers. Resources allow \
|
||||
servers to share data that provides context to language models, such \
|
||||
as files, database schemas, or application-specific information. \
|
||||
Prefer resources over web search when possible."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
@@ -752,7 +1123,8 @@ fn create_list_mcp_resource_templates_tool() -> ToolSpec {
|
||||
"server".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Optional MCP server name. When omitted, lists resource templates from all configured servers."
|
||||
"Optional MCP server name. When omitted, lists resource \
|
||||
templates from all configured servers."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
@@ -761,7 +1133,8 @@ fn create_list_mcp_resource_templates_tool() -> ToolSpec {
|
||||
"cursor".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"Opaque cursor returned by a previous list_mcp_resource_templates call for the same server."
|
||||
"Opaque cursor returned by a previous \
|
||||
list_mcp_resource_templates call for the same server."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
@@ -769,7 +1142,12 @@ fn create_list_mcp_resource_templates_tool() -> ToolSpec {
|
||||
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "list_mcp_resource_templates".to_string(),
|
||||
description: "Lists resource templates provided by MCP servers. Parameterized resource templates allow servers to share data that takes parameters and provides context to language models, such as files, database schemas, or application-specific information. Prefer resource templates over web search when possible.".to_string(),
|
||||
description: "Lists resource templates provided by MCP servers. \
|
||||
Parameterized resource templates allow servers to share data that \
|
||||
takes parameters and provides context to language models, such as \
|
||||
files, database schemas, or application-specific information. Prefer \
|
||||
resource templates over web search when possible."
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
@@ -785,7 +1163,8 @@ fn create_read_mcp_resource_tool() -> ToolSpec {
|
||||
"server".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(
|
||||
"MCP server name exactly as configured. Must match the 'server' field returned by list_mcp_resources."
|
||||
"MCP server name exactly as configured. Must match the \
|
||||
'server' field returned by list_mcp_resources."
|
||||
.to_string(),
|
||||
),
|
||||
},
|
||||
@@ -1025,6 +1404,7 @@ pub(crate) fn build_specs(
|
||||
use crate::tools::handlers::ReadFileHandler;
|
||||
use crate::tools::handlers::ShellCommandHandler;
|
||||
use crate::tools::handlers::ShellHandler;
|
||||
use crate::tools::handlers::SubagentToolHandler;
|
||||
use crate::tools::handlers::TestSyncHandler;
|
||||
use crate::tools::handlers::UnifiedExecHandler;
|
||||
use crate::tools::handlers::ViewImageHandler;
|
||||
@@ -1042,8 +1422,8 @@ pub(crate) fn build_specs(
|
||||
let shell_command_handler = Arc::new(ShellCommandHandler);
|
||||
|
||||
match &config.shell_type {
|
||||
ConfigShellToolType::Default(truncation_policy) => {
|
||||
builder.push_spec(create_shell_tool(*truncation_policy));
|
||||
ConfigShellToolType::Default => {
|
||||
builder.push_spec(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::Local => {
|
||||
builder.push_spec(ToolSpec::LocalShell {});
|
||||
@@ -1057,8 +1437,8 @@ pub(crate) fn build_specs(
|
||||
ConfigShellToolType::Disabled => {
|
||||
// Do nothing.
|
||||
}
|
||||
ConfigShellToolType::ShellCommand(truncation_policy) => {
|
||||
builder.push_spec(create_shell_command_tool(*truncation_policy));
|
||||
ConfigShellToolType::ShellCommand => {
|
||||
builder.push_spec(create_shell_command_tool());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1080,6 +1460,29 @@ pub(crate) fn build_specs(
|
||||
builder.push_spec(PLAN_TOOL.clone());
|
||||
builder.register_handler("update_plan", plan_handler);
|
||||
|
||||
if config.include_subagent_tools {
|
||||
// Built-in subagent orchestrator tools (one per action).
|
||||
let subagent_handler = Arc::new(SubagentToolHandler);
|
||||
builder.push_spec(create_subagent_spawn_tool());
|
||||
builder.register_handler("subagent_spawn", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_fork_tool());
|
||||
builder.register_handler("subagent_fork", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_send_message_tool());
|
||||
builder.register_handler("subagent_send_message", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_list_tool());
|
||||
builder.register_handler("subagent_list", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_await_tool());
|
||||
builder.register_handler("subagent_await", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_watchdog_tool());
|
||||
builder.register_handler("subagent_watchdog", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_prune_tool());
|
||||
builder.register_handler("subagent_prune", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_logs_tool());
|
||||
builder.register_handler("subagent_logs", subagent_handler.clone());
|
||||
builder.push_spec(create_subagent_cancel_tool());
|
||||
builder.register_handler("subagent_cancel", subagent_handler);
|
||||
}
|
||||
|
||||
if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type {
|
||||
match apply_patch_tool_type {
|
||||
ApplyPatchToolType::Freeform => {
|
||||
@@ -1201,11 +1604,11 @@ mod tests {
|
||||
|
||||
fn shell_tool_name(config: &ToolsConfig) -> Option<&'static str> {
|
||||
match config.shell_type {
|
||||
ConfigShellToolType::Default(_) => Some("shell"),
|
||||
ConfigShellToolType::Default => Some("shell"),
|
||||
ConfigShellToolType::Local => Some("local_shell"),
|
||||
ConfigShellToolType::UnifiedExec => None,
|
||||
ConfigShellToolType::Disabled => None,
|
||||
ConfigShellToolType::ShellCommand(_) => Some("shell_command"),
|
||||
ConfigShellToolType::ShellCommand => Some("shell_command"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1469,6 +1872,24 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exp_5_1_defaults() {
|
||||
assert_model_tools(
|
||||
"exp-5.1",
|
||||
&Features::with_defaults(),
|
||||
&[
|
||||
"exec_command",
|
||||
"write_stdin",
|
||||
"list_mcp_resources",
|
||||
"list_mcp_resource_templates",
|
||||
"read_mcp_resource",
|
||||
"update_plan",
|
||||
"apply_patch",
|
||||
"view_image",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_codex_mini_unified_exec_web_search() {
|
||||
assert_model_tools(
|
||||
@@ -1489,6 +1910,52 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subagent_tools_gated_by_feature() {
|
||||
let model_family = find_family_for_model("gpt-5-codex")
|
||||
.expect("gpt-5-codex should be a valid model family");
|
||||
let mut base_features = Features::with_defaults();
|
||||
base_features.enable(Feature::UnifiedExec);
|
||||
base_features.enable(Feature::WebSearchRequest);
|
||||
base_features.enable(Feature::ViewImageTool);
|
||||
|
||||
let config_without = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
features: &base_features,
|
||||
});
|
||||
let (tools_without, _) = build_specs(&config_without, None).build();
|
||||
let missing = tools_without
|
||||
.iter()
|
||||
.map(|t| tool_name(&t.spec))
|
||||
.filter(|name| name.starts_with("subagent_"))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(
|
||||
missing.is_empty(),
|
||||
"subagent tools should be disabled by default: {missing:?}"
|
||||
);
|
||||
|
||||
let mut enabled_features = base_features.clone();
|
||||
enabled_features.enable(Feature::SubagentTools);
|
||||
let config_with = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
features: &enabled_features,
|
||||
});
|
||||
let (tools_with, _) = build_specs(&config_with, None).build();
|
||||
assert_contains_tool_names(
|
||||
&tools_with,
|
||||
&[
|
||||
"subagent_spawn",
|
||||
"subagent_fork",
|
||||
"subagent_send_message",
|
||||
"subagent_list",
|
||||
"subagent_await",
|
||||
"subagent_prune",
|
||||
"subagent_logs",
|
||||
"subagent_cancel",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_specs_default_shell_present() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
@@ -1951,7 +2418,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool() {
|
||||
let tool = super::create_shell_tool(TruncationPolicy::Bytes(10_000));
|
||||
let tool = super::create_shell_tool();
|
||||
let ToolSpec::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1981,7 +2448,7 @@ Examples of valid command strings:
|
||||
|
||||
#[test]
|
||||
fn test_shell_command_tool() {
|
||||
let tool = super::create_shell_command_tool(TruncationPolicy::Tokens(10_000));
|
||||
let tool = super::create_shell_command_tool();
|
||||
let ToolSpec::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
|
||||
@@ -174,7 +174,6 @@ impl UnifiedExecSessionManager {
|
||||
turn_ref.as_ref(),
|
||||
request.call_id,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
};
|
||||
interaction_emitter
|
||||
@@ -370,7 +369,6 @@ impl UnifiedExecSessionManager {
|
||||
entry.turn_ref.as_ref(),
|
||||
&entry.call_id,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let emitter = ToolEmitter::unified_exec(
|
||||
&entry.command,
|
||||
@@ -404,7 +402,6 @@ impl UnifiedExecSessionManager {
|
||||
context.turn.as_ref(),
|
||||
&context.call_id,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let emitter =
|
||||
ToolEmitter::unified_exec(command, cwd, ExecCommandSource::UnifiedExecStartup, None);
|
||||
|
||||
16
codex-rs/core/subagent_prompt.md
Normal file
16
codex-rs/core/subagent_prompt.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# You are a Subagent
|
||||
|
||||
You are a **subagent** in a multi‑agent Codex session. You may see prior conversation context, but treat it as background; your primary goal is to respond to the prompt you have just been given.
|
||||
|
||||
Another agent has created you to complete a specific part of a larger task. Your job is to do that work carefully and efficiently, then communicate what you have done so your parent agent can integrate the results.
|
||||
|
||||
Work style:
|
||||
|
||||
- Stay within the scope of the prompt and the files or questions you’ve been given.
|
||||
- Respect the parent/root agent’s instructions and the configured sandbox/approval rules; never attempt to bypass safety constraints.
|
||||
- When you make meaningful progress or finish a sub‑task, send a short summary back to your parent via `subagent_send_message` so they can see what changed.
|
||||
- If you need to coordinate with another agent, use `subagent_send_message` to send a clear, concise request and, when appropriate, a brief summary of context.
|
||||
- Use `subagent_await` only when you truly need to wait for another agent’s response before continuing. If you can keep working independently, prefer to do so and send progress updates instead of blocking.
|
||||
- Use `subagent_logs` only when you need to inspect another agent’s recent activity without changing its state.
|
||||
|
||||
Communicate in plain language. Explain what you changed, what you observed, and what you recommend next, so that your parent agent can make good decisions without rereading all of your intermediate steps.
|
||||
@@ -0,0 +1,3 @@
|
||||
{"run_id":"1763719712-538790000","line":471,"new":{"module_name":"subagent_logs_snapshots","snapshot_name":"subagent_logs_snapshot_no_older_history","metadata":{"source":"core/tests/subagent_logs_snapshots.rs","assertion_line":471,"expression":"rendered"},"snapshot":"Session 019a9999-aaaa-bbbb-cccc-ddddeeeeffff • status=idle • older_logs=false • at_latest=true\n1970-01-01T00:00:01.000Z Assistant: only event"},"old":{"module_name":"subagent_logs_snapshots","metadata":{},"snapshot":"Session 019a9999-aaaa-bbbb-cccc-ddddeeeeffff • status=idle • older_logs=false • at_latest=true\n 1970-01-01T00:00:01.000Z Assistant: only event"}}
|
||||
{"run_id":"1763719712-538790000","line":433,"new":{"module_name":"subagent_logs_snapshots","snapshot_name":"subagent_logs_snapshot_reasoning_stream","metadata":{"source":"core/tests/subagent_logs_snapshots.rs","assertion_line":433,"expression":"rendered"},"snapshot":"Session 019a713e-eeee-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true\n1970-01-01T00:00:01.100Z Thinking: thinking about streaming state (3 deltas)"},"old":{"module_name":"subagent_logs_snapshots","metadata":{},"snapshot":"Session 019a713e-eeee-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true\n 1970-01-01T00:00:01.100Z Thinking: thinking about streaming state (3 deltas)"}}
|
||||
{"run_id":"1763719712-538790000","line":362,"new":{"module_name":"subagent_logs_snapshots","snapshot_name":"subagent_logs_snapshot_streaming_deltas","metadata":{"source":"core/tests/subagent_logs_snapshots.rs","assertion_line":362,"expression":"rendered"},"snapshot":"Session 019a713e-6ce4-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true\n2025-11-11T04:48:47.148Z Assistant (typing): is composing a longer answer (5 chunks)"},"old":{"module_name":"subagent_logs_snapshots","metadata":{},"snapshot":"Session 019a713e-6ce4-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true\n 2025-11-11T04:48:47.148Z Assistant (typing): is composing a longer answer (5 chunks)"}}
|
||||
@@ -18,3 +18,4 @@ tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["time"] }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -172,6 +172,15 @@ pub fn sandbox_network_env_var() -> &'static str {
|
||||
codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
||||
}
|
||||
|
||||
pub fn format_with_current_shell(command: &str) -> Vec<String> {
|
||||
codex_core::shell::default_user_shell().derive_exec_args(command, true)
|
||||
}
|
||||
|
||||
pub fn format_with_current_shell_display(command: &str) -> String {
|
||||
let args = format_with_current_shell(command);
|
||||
shlex::try_join(args.iter().map(String::as_str)).expect("serialize current shell command")
|
||||
}
|
||||
|
||||
pub mod fs_wait {
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
|
||||
@@ -462,8 +462,11 @@ pub fn ev_apply_patch_function_call(call_id: &str, patch: &str) -> Value {
|
||||
|
||||
pub fn ev_shell_command_call(call_id: &str, command: &str) -> Value {
|
||||
let args = serde_json::json!({ "command": command });
|
||||
let arguments = serde_json::to_string(&args).expect("serialize shell arguments");
|
||||
ev_shell_command_call_with_args(call_id, &args)
|
||||
}
|
||||
|
||||
pub fn ev_shell_command_call_with_args(call_id: &str, args: &serde_json::Value) -> Value {
|
||||
let arguments = serde_json::to_string(args).expect("serialize shell command arguments");
|
||||
ev_function_call(call_id, "shell_command", &arguments)
|
||||
}
|
||||
|
||||
|
||||
479
codex-rs/core/tests/subagent_logs_snapshots.rs
Normal file
479
codex-rs/core/tests/subagent_logs_snapshots.rs
Normal file
@@ -0,0 +1,479 @@
|
||||
use codex_core::PageDirection;
|
||||
use codex_core::render_logs_as_text;
|
||||
use codex_core::render_logs_as_text_with_max_lines;
|
||||
use codex_core::subagents::LogEntry;
|
||||
use codex_protocol::ConversationId;
|
||||
use insta::assert_snapshot;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
|
||||
fn exec_sleep_logs() -> Vec<LogEntry> {
|
||||
let events_json = json!([
|
||||
{
|
||||
"timestamp_ms": 1762823213424i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "reasoning_content_delta",
|
||||
"thread_id": "019a7073-88e5-7461-93a0-ae092f019246",
|
||||
"turn_id": "0",
|
||||
"item_id": "rs_0cb9136244ae700b0169128c2c63ec81a084a7fba2604df9fa",
|
||||
"delta": "**"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823213442i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "item_completed",
|
||||
"thread_id": "019a7073-88e5-7461-93a0-ae092f019246",
|
||||
"turn_id": "0",
|
||||
"item": {
|
||||
"Reasoning": {
|
||||
"id": "rs_0cb9136244ae700b0169128c2c63ec81a084a7fba2604df9fa",
|
||||
"summary_text": ["**Evaluating safe shell command execution**"],
|
||||
"raw_content": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823213442i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "agent_reasoning",
|
||||
"text": "**Evaluating safe shell command execution**"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823213628i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "exec_command_begin",
|
||||
"call_id": "call_hBhXJmeCagENc5VGd12udWE3",
|
||||
"command": ["bash", "-lc", "sleep 60"],
|
||||
"cwd": "/Users/friel/code/codex",
|
||||
"parsed_cmd": [ { "type": "unknown", "cmd": "sleep 60" } ],
|
||||
"is_user_shell_command": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
serde_json::from_value(events_json).expect("valid exec_sleep_logs JSON")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_paging_tail_vs_full_exec_sleep() {
|
||||
// Demonstrate that a one-line tail view is a suffix of the
|
||||
// full transcript, and that a generous max_lines reproduces
|
||||
// the full rendering.
|
||||
let logs = exec_sleep_logs();
|
||||
let session = ConversationId::from_string("019a7073-88e5-7461-93a0-adf67192b17b")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = 21; // from exp4-real-run1
|
||||
let more = true;
|
||||
|
||||
// Full transcript for this window.
|
||||
let full = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
// Tail view: header + last content line only.
|
||||
let tail_one = render_logs_as_text_with_max_lines(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
1,
|
||||
PageDirection::Backward,
|
||||
);
|
||||
|
||||
// Generous max_lines reproduces the full transcript.
|
||||
let tail_many = render_logs_as_text_with_max_lines(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
30,
|
||||
PageDirection::Backward,
|
||||
);
|
||||
|
||||
assert_eq!(full, tail_many);
|
||||
|
||||
// Snapshot the one-line tail to make the behavior obvious.
|
||||
assert_snapshot!(
|
||||
tail_one,
|
||||
@r###"Session 019a7073-88e5-7461-93a0-adf67192b17b • status=waiting_on_tool • older_logs=true • at_latest=true
|
||||
2025-11-11T01:06:53.628Z 🛠 exec bash -lc sleep 60 · cwd=/Users/friel/code/codex · running (0.0s)"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_paging_line_by_line_exec_sleep() {
|
||||
// Show what the transcript looks like as we increase the
|
||||
// line budget from 1 to 3 (backward paging), to mimic a
|
||||
// user scrolling back line-by-line.
|
||||
let logs = exec_sleep_logs();
|
||||
let session = ConversationId::from_string("019a7073-88e5-7461-93a0-adf67192b17b")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = 21; // from exp4-real-run1
|
||||
let more = true;
|
||||
|
||||
let mut pages = Vec::new();
|
||||
for max_lines in 1..=3 {
|
||||
let rendered = render_logs_as_text_with_max_lines(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
max_lines,
|
||||
PageDirection::Backward,
|
||||
);
|
||||
pages.push(format!("lines={max_lines}\n{rendered}"));
|
||||
}
|
||||
|
||||
let snapshot = pages.join("\n---\n");
|
||||
|
||||
assert_snapshot!(
|
||||
snapshot,
|
||||
@r###"lines=1
|
||||
Session 019a7073-88e5-7461-93a0-adf67192b17b • status=waiting_on_tool • older_logs=true • at_latest=true
|
||||
2025-11-11T01:06:53.628Z 🛠 exec bash -lc sleep 60 · cwd=/Users/friel/code/codex · running (0.0s)
|
||||
---
|
||||
lines=2
|
||||
Session 019a7073-88e5-7461-93a0-adf67192b17b • status=waiting_on_tool • older_logs=true • at_latest=true
|
||||
2025-11-11T01:06:53.442Z Reasoning summary: **Evaluating safe shell command execution**
|
||||
2025-11-11T01:06:53.628Z 🛠 exec bash -lc sleep 60 · cwd=/Users/friel/code/codex · running (0.0s)
|
||||
---
|
||||
lines=3
|
||||
Session 019a7073-88e5-7461-93a0-adf67192b17b • status=waiting_on_tool • older_logs=true • at_latest=true
|
||||
2025-11-11T01:06:53.424Z Thinking: ** (1 delta)
|
||||
2025-11-11T01:06:53.442Z Reasoning summary: **Evaluating safe shell command execution**
|
||||
2025-11-11T01:06:53.628Z 🛠 exec bash -lc sleep 60 · cwd=/Users/friel/code/codex · running (0.0s)"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_snapshot_baseline() {
|
||||
// Grounded in exp1-real-run1 first subagent_logs response (t=0).
|
||||
let events_json = json!([
|
||||
{
|
||||
"timestamp_ms": 1762823311742i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message", "message": "Hello world" } }
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823311766i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "token_count",
|
||||
"info": {
|
||||
"total_token_usage": {
|
||||
"input_tokens": 11073,
|
||||
"cached_input_tokens": 11008,
|
||||
"output_tokens": 8,
|
||||
"reasoning_output_tokens": 0,
|
||||
"total_tokens": 11081
|
||||
},
|
||||
"last_token_usage": {
|
||||
"input_tokens": 11073,
|
||||
"cached_input_tokens": 11008,
|
||||
"output_tokens": 8,
|
||||
"reasoning_output_tokens": 0,
|
||||
"total_tokens": 11081
|
||||
},
|
||||
"model_context_window": 258400
|
||||
},
|
||||
"rate_limits": { "primary": null, "secondary": null }
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823311766i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "raw_response_item",
|
||||
"item": {
|
||||
"type": "reasoning",
|
||||
"summary": [ { "type": "summary_text", "text": "**Identifying sandbox requirements**" } ],
|
||||
"content": null,
|
||||
"encrypted_content": "[encrypted]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823311766i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "raw_response_item",
|
||||
"item": {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [ { "type": "output_text", "text": "Hello world" } ]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762823311766i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": { "type": "task_complete", "last_agent_message": "Hello world" }
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
let logs: Vec<LogEntry> =
|
||||
serde_json::from_value(events_json).expect("valid baseline logs JSON");
|
||||
let session = ConversationId::from_string("019a7075-0760-79c2-8dd1-985772995ecf")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = logs.len();
|
||||
let more = false;
|
||||
|
||||
let rendered = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
assert_snapshot!(
|
||||
rendered,
|
||||
@r###"Session 019a7075-0760-79c2-8dd1-985772995ecf • status=idle • older_logs=false • at_latest=true
|
||||
2025-11-11T01:08:31.766Z Assistant: Hello world
|
||||
2025-11-11T01:08:31.766Z Thinking: **Identifying sandbox requirements**
|
||||
2025-11-11T01:08:31.766Z Task complete"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_snapshot_exec_sleep_command() {
|
||||
// Grounded in exp4-real-run1 first subagent_logs response (t=0).
|
||||
let logs = exec_sleep_logs();
|
||||
let session = ConversationId::from_string("019a7073-88e5-7461-93a0-adf67192b17b")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = logs.len();
|
||||
let more = false;
|
||||
|
||||
let rendered = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
assert_snapshot!(
|
||||
rendered,
|
||||
@r###"Session 019a7073-88e5-7461-93a0-adf67192b17b • status=waiting_on_tool • older_logs=false • at_latest=true
|
||||
2025-11-11T01:06:53.424Z Thinking: ** (1 delta)
|
||||
2025-11-11T01:06:53.442Z Reasoning summary: **Evaluating safe shell command execution**
|
||||
2025-11-11T01:06:53.628Z 🛠 exec bash -lc sleep 60 · cwd=/Users/friel/code/codex · running (0.0s)"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_snapshot_streaming_deltas() {
|
||||
// Grounded in exp5-real-run1 agent_message_content_delta stream (t≈?s).
|
||||
let events_json = json!([
|
||||
{
|
||||
"timestamp_ms": 1762836527094i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message_content_delta", "thread_id": "019a713e-6ce6-7f82-b1e7-359628267934", "turn_id": "0", "item_id": "msg_0c5117240874292f016912c020d658819cb71e8bad4676a7c0", "delta": " is" } }
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762836527105i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message_content_delta", "thread_id": "019a713e-6ce6-7f82-b1e7-359628267934", "turn_id": "0", "item_id": "msg_0c5117240874292f016912c020d658819cb71e8bad4676a7c0", "delta": " composing" } }
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762836527121i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message_content_delta", "thread_id": "019a713e-6ce6-7f82-b1e7-359628267934", "turn_id": "0", "item_id": "msg_0c5117240874292f016912c020d658819cb71e8bad4676a7c0", "delta": " a" } }
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762836527137i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message_content_delta", "thread_id": "019a713e-6ce6-7f82-b1e7-359628267934", "turn_id": "0", "item_id": "msg_0c5117240874292f016912c020d658819cb71e8bad4676a7c0", "delta": " longer" } }
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1762836527148i64,
|
||||
"event": { "id": "0", "msg": { "type": "agent_message_content_delta", "thread_id": "019a713e-6ce6-7f82-b1e7-359628267934", "turn_id": "0", "item_id": "msg_0c5117240874292f016912c020d658819cb71e8bad4676a7c0", "delta": " answer" } }
|
||||
}
|
||||
]);
|
||||
let logs: Vec<LogEntry> =
|
||||
serde_json::from_value(events_json).expect("valid streaming_deltas JSON");
|
||||
let session = ConversationId::from_string("019a713e-6ce4-73e0-bf9b-e070890e3790")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = logs.len();
|
||||
let more = false;
|
||||
|
||||
let rendered = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
assert_snapshot!(
|
||||
rendered,
|
||||
@r###"Session 019a713e-6ce4-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true
|
||||
2025-11-11T04:48:47.148Z Assistant (typing): is composing a longer answer (5 chunks)"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_snapshot_reasoning_stream() {
|
||||
// Synthetic example of mid-reasoning without a summary yet.
|
||||
let events_json = json!([
|
||||
{
|
||||
"timestamp_ms": 1_000i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "reasoning_content_delta",
|
||||
"thread_id": "thread-1",
|
||||
"turn_id": "0",
|
||||
"item_id": "rs_test",
|
||||
"delta": " thinking"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1_050i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "reasoning_content_delta",
|
||||
"thread_id": "thread-1",
|
||||
"turn_id": "0",
|
||||
"item_id": "rs_test",
|
||||
"delta": " about"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp_ms": 1_100i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": {
|
||||
"type": "reasoning_content_delta",
|
||||
"thread_id": "thread-1",
|
||||
"turn_id": "0",
|
||||
"item_id": "rs_test",
|
||||
"delta": " streaming state"
|
||||
}
|
||||
}
|
||||
}
|
||||
]);
|
||||
let logs: Vec<LogEntry> =
|
||||
serde_json::from_value(events_json).expect("valid reasoning_stream JSON");
|
||||
let session = ConversationId::from_string("019a713e-eeee-73e0-bf9b-e070890e3790")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = logs.len();
|
||||
let more = false;
|
||||
|
||||
let rendered = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
assert_snapshot!(
|
||||
rendered,
|
||||
@r###"Session 019a713e-eeee-73e0-bf9b-e070890e3790 • status=working • older_logs=false • at_latest=true
|
||||
1970-01-01T00:00:01.100Z Thinking: thinking about streaming state (3 deltas)"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subagent_logs_snapshot_no_older_history() {
|
||||
// Minimal case: single assistant message, no older history, at latest.
|
||||
let events_json = json!([
|
||||
{
|
||||
"timestamp_ms": 1_000i64,
|
||||
"event": {
|
||||
"id": "0",
|
||||
"msg": { "type": "agent_message", "message": "only event" }
|
||||
}
|
||||
}
|
||||
]);
|
||||
let logs: Vec<LogEntry> = serde_json::from_value(events_json).expect("valid single-event JSON");
|
||||
let session = ConversationId::from_string("019a9999-aaaa-bbbb-cccc-ddddeeeeffff")
|
||||
.expect("valid session id");
|
||||
let earliest_ms = logs.first().map(|e| e.timestamp_ms);
|
||||
let latest_ms = logs.last().map(|e| e.timestamp_ms);
|
||||
let returned = logs.len();
|
||||
let total = logs.len();
|
||||
let more = false;
|
||||
|
||||
let rendered = render_logs_as_text(
|
||||
session,
|
||||
&logs,
|
||||
earliest_ms,
|
||||
latest_ms,
|
||||
returned,
|
||||
total,
|
||||
more,
|
||||
);
|
||||
|
||||
assert_snapshot!(
|
||||
rendered,
|
||||
@r###"Session 019a9999-aaaa-bbbb-cccc-ddddeeeeffff • status=idle • older_logs=false • at_latest=true
|
||||
1970-01-01T00:00:01.000Z Assistant: only event"###
|
||||
);
|
||||
}
|
||||
|
||||
// Note: payload-shape and paging semantics (since_ms/before_ms/limit/max_bytes)
|
||||
// are covered in focused unit tests in core/src/tools/handlers/subagent.rs.
|
||||
@@ -17,15 +17,11 @@ use core_test_support::wait_for_event;
|
||||
use regex_lite::Regex;
|
||||
use serde_json::json;
|
||||
|
||||
/// Integration test: spawn a long‑running shell tool via a mocked Responses SSE
|
||||
/// Integration test: spawn a long‑running shell_command tool via a mocked Responses SSE
|
||||
/// function call, then interrupt the session and expect TurnAborted.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"sleep 60".to_string(),
|
||||
];
|
||||
let command = "sleep 60";
|
||||
|
||||
let args = json!({
|
||||
"command": command,
|
||||
@@ -33,14 +29,19 @@ async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
})
|
||||
.to_string();
|
||||
let body = sse(vec![
|
||||
ev_function_call("call_sleep", "shell", &args),
|
||||
ev_function_call("call_sleep", "shell_command", &args),
|
||||
ev_completed("done"),
|
||||
]);
|
||||
|
||||
let server = start_mock_server().await;
|
||||
mount_sse_once(&server, body).await;
|
||||
|
||||
let codex = test_codex().build(&server).await.unwrap().codex;
|
||||
let codex = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.build(&server)
|
||||
.await
|
||||
.unwrap()
|
||||
.codex;
|
||||
|
||||
// Kick off a turn that triggers the function call.
|
||||
codex
|
||||
@@ -67,11 +68,7 @@ async fn interrupt_long_running_tool_emits_turn_aborted() {
|
||||
/// responses server, and ensures the model receives the synthesized abort.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn interrupt_tool_records_history_entries() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"sleep 60".to_string(),
|
||||
];
|
||||
let command = "sleep 60";
|
||||
let call_id = "call-history";
|
||||
|
||||
let args = json!({
|
||||
@@ -81,7 +78,7 @@ async fn interrupt_tool_records_history_entries() {
|
||||
.to_string();
|
||||
let first_body = sse(vec![
|
||||
ev_response_created("resp-history"),
|
||||
ev_function_call(call_id, "shell", &args),
|
||||
ev_function_call(call_id, "shell_command", &args),
|
||||
ev_completed("resp-history"),
|
||||
]);
|
||||
let follow_up_body = sse(vec![
|
||||
@@ -92,7 +89,11 @@ async fn interrupt_tool_records_history_entries() {
|
||||
let server = start_mock_server().await;
|
||||
let response_mock = mount_sse_sequence(&server, vec![first_body, follow_up_body]).await;
|
||||
|
||||
let fixture = test_codex().build(&server).await.unwrap();
|
||||
let fixture = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.build(&server)
|
||||
.await
|
||||
.unwrap();
|
||||
let codex = Arc::clone(&fixture.codex);
|
||||
|
||||
codex
|
||||
|
||||
@@ -667,7 +667,7 @@ async fn apply_patch_cli_verification_failure_has_no_side_effects(
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<()> {
|
||||
async fn apply_patch_shell_command_heredoc_with_cd_updates_relative_workdir() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = apply_patch_harness_with(|config| {
|
||||
@@ -684,14 +684,11 @@ async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<
|
||||
|
||||
let script = "cd sub && apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: in_sub.txt\n@@\n-before\n+after\n*** End Patch\nEOF\n";
|
||||
let call_id = "shell-heredoc-cd";
|
||||
let args = json!({
|
||||
"command": ["bash", "-lc", script],
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let args = json!({ "command": script, "timeout_ms": 5_000 });
|
||||
let bodies = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
@@ -706,14 +703,14 @@ async fn apply_patch_shell_heredoc_with_cd_updates_relative_workdir() -> Result<
|
||||
let out = harness.function_call_stdout(call_id).await;
|
||||
assert!(
|
||||
out.contains("Success."),
|
||||
"expected successful apply_patch invocation via shell: {out}"
|
||||
"expected successful apply_patch invocation via shell_command: {out}"
|
||||
);
|
||||
assert_eq!(fs::read_to_string(&target)?, "after\n");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<()> {
|
||||
async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let harness = apply_patch_harness_with(|config| {
|
||||
@@ -730,14 +727,11 @@ async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<(
|
||||
|
||||
let script = "apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: invalid.txt\n@@\n-nope\n+changed\n*** End Patch\nEOF\n";
|
||||
let call_id = "shell-apply-failure";
|
||||
let args = json!({
|
||||
"command": ["bash", "-lc", script],
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let args = json!({ "command": script, "timeout_ms": 5_000 });
|
||||
let bodies = vec![
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
@@ -780,10 +774,6 @@ async fn apply_patch_shell_failure_propagates_error_and_skips_diff() -> Result<(
|
||||
);
|
||||
|
||||
let out = harness.function_call_stdout(call_id).await;
|
||||
assert!(
|
||||
out.contains("apply_patch verification failed"),
|
||||
"expected verification failure message"
|
||||
);
|
||||
assert!(
|
||||
out.contains("Failed to find expected lines in"),
|
||||
"expected failure diagnostics: {out}"
|
||||
|
||||
@@ -71,7 +71,7 @@ enum ActionKind {
|
||||
response_body: &'static str,
|
||||
},
|
||||
RunCommand {
|
||||
command: &'static [&'static str],
|
||||
command: &'static str,
|
||||
},
|
||||
RunUnifiedExecCommand {
|
||||
command: &'static str,
|
||||
@@ -97,20 +97,12 @@ impl ActionKind {
|
||||
server: &MockServer,
|
||||
call_id: &str,
|
||||
with_escalated_permissions: bool,
|
||||
) -> Result<(Value, Option<Vec<String>>)> {
|
||||
) -> Result<(Value, Option<String>)> {
|
||||
match self {
|
||||
ActionKind::WriteFile { target, content } => {
|
||||
let (path, _) = target.resolve_for_patch(test);
|
||||
let _ = fs::remove_file(&path);
|
||||
let command = vec![
|
||||
"/bin/sh".to_string(),
|
||||
"-c".to_string(),
|
||||
format!(
|
||||
"printf {content:?} > {path:?} && cat {path:?}",
|
||||
content = content,
|
||||
path = path
|
||||
),
|
||||
];
|
||||
let command = format!("printf {content:?} > {path:?} && cat {path:?}");
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
}
|
||||
@@ -127,21 +119,18 @@ impl ActionKind {
|
||||
.await;
|
||||
|
||||
let url = format!("{}{}", server.uri(), endpoint);
|
||||
let escaped_url = url.replace('\'', "\\'");
|
||||
let script = format!(
|
||||
"import sys\nimport urllib.request\nurl = {url:?}\ntry:\n data = urllib.request.urlopen(url, timeout=2).read().decode()\n print('OK:' + data.strip())\nexcept Exception as exc:\n print('ERR:' + exc.__class__.__name__)\n sys.exit(1)",
|
||||
"import sys\nimport urllib.request\nurl = '{escaped_url}'\ntry:\n data = urllib.request.urlopen(url, timeout=2).read().decode()\n print('OK:' + data.strip())\nexcept Exception as exc:\n print('ERR:' + exc.__class__.__name__)\n sys.exit(1)",
|
||||
);
|
||||
|
||||
let command = vec!["python3".to_string(), "-c".to_string(), script];
|
||||
let command = format!("python3 -c \"{script}\"");
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
}
|
||||
ActionKind::RunCommand { command } => {
|
||||
let command: Vec<String> = command
|
||||
.iter()
|
||||
.map(std::string::ToString::to_string)
|
||||
.collect();
|
||||
let event = shell_event(call_id, &command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command)))
|
||||
let event = shell_event(call_id, command, 1_000, with_escalated_permissions)?;
|
||||
Ok((event, Some(command.to_string())))
|
||||
}
|
||||
ActionKind::RunUnifiedExecCommand {
|
||||
command,
|
||||
@@ -154,14 +143,7 @@ impl ActionKind {
|
||||
with_escalated_permissions,
|
||||
*justification,
|
||||
)?;
|
||||
Ok((
|
||||
event,
|
||||
Some(vec![
|
||||
"/bin/bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
command.to_string(),
|
||||
]),
|
||||
))
|
||||
Ok((event, Some(command.to_string())))
|
||||
}
|
||||
ActionKind::ApplyPatchFunction { target, content } => {
|
||||
let (path, patch_path) = target.resolve_for_patch(test);
|
||||
@@ -185,19 +167,19 @@ fn build_add_file_patch(patch_path: &str, content: &str) -> String {
|
||||
format!("*** Begin Patch\n*** Add File: {patch_path}\n+{content}\n*** End Patch\n")
|
||||
}
|
||||
|
||||
fn shell_apply_patch_command(patch: &str) -> Vec<String> {
|
||||
fn shell_apply_patch_command(patch: &str) -> String {
|
||||
let mut script = String::from("apply_patch <<'PATCH'\n");
|
||||
script.push_str(patch);
|
||||
if !patch.ends_with('\n') {
|
||||
script.push('\n');
|
||||
}
|
||||
script.push_str("PATCH\n");
|
||||
vec!["bash".to_string(), "-lc".to_string(), script]
|
||||
script
|
||||
}
|
||||
|
||||
fn shell_event(
|
||||
call_id: &str,
|
||||
command: &[String],
|
||||
command: &str,
|
||||
timeout_ms: u64,
|
||||
with_escalated_permissions: bool,
|
||||
) -> Result<Value> {
|
||||
@@ -209,7 +191,7 @@ fn shell_event(
|
||||
args["with_escalated_permissions"] = json!(true);
|
||||
}
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
Ok(ev_function_call(call_id, "shell", &args_str))
|
||||
Ok(ev_function_call(call_id, "shell_command", &args_str))
|
||||
}
|
||||
|
||||
fn exec_command_event(
|
||||
@@ -296,7 +278,10 @@ impl Expectation {
|
||||
}
|
||||
Expectation::FileCreatedNoExitCode { target, content } => {
|
||||
let (path, _) = target.resolve_for_patch(test);
|
||||
assert_eq!(result.exit_code, None, "expected no exit code for {path:?}");
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for {path:?}",
|
||||
);
|
||||
assert!(
|
||||
result.stdout.contains(content),
|
||||
"stdout missing {content:?}: {}",
|
||||
@@ -385,8 +370,8 @@ impl Expectation {
|
||||
);
|
||||
}
|
||||
Expectation::NetworkSuccessNoExitCode { body_contains } => {
|
||||
assert_eq!(
|
||||
result.exit_code, None,
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for successful network call: {}",
|
||||
result.stdout
|
||||
);
|
||||
@@ -433,8 +418,8 @@ impl Expectation {
|
||||
);
|
||||
}
|
||||
Expectation::CommandSuccessNoExitCode { stdout_contains } => {
|
||||
assert_eq!(
|
||||
result.exit_code, None,
|
||||
assert!(
|
||||
result.exit_code.is_none() || result.exit_code == Some(0),
|
||||
"expected no exit code for trusted command: {}",
|
||||
result.stdout
|
||||
);
|
||||
@@ -531,10 +516,18 @@ fn parse_result(item: &Value) -> CommandResult {
|
||||
CommandResult { exit_code, stdout }
|
||||
}
|
||||
Err(_) => {
|
||||
let structured = Regex::new(r"(?s)^Exit code:\s*(-?\d+).*?Output:\n(.*)$").unwrap();
|
||||
let regex =
|
||||
Regex::new(r"(?s)^.*?Process exited with code (\d+)\n.*?Output:\n(.*)$").unwrap();
|
||||
// parse freeform output
|
||||
if let Some(captures) = regex.captures(output_str) {
|
||||
if let Some(captures) = structured.captures(output_str) {
|
||||
let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap();
|
||||
let output = captures.get(2).unwrap().as_str();
|
||||
CommandResult {
|
||||
exit_code: Some(exit_code),
|
||||
stdout: output.to_string(),
|
||||
}
|
||||
} else if let Some(captures) = regex.captures(output_str) {
|
||||
let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap();
|
||||
let output = captures.get(2).unwrap().as_str();
|
||||
CommandResult {
|
||||
@@ -553,7 +546,7 @@ fn parse_result(item: &Value) -> CommandResult {
|
||||
|
||||
async fn expect_exec_approval(
|
||||
test: &TestCodex,
|
||||
expected_command: &[String],
|
||||
expected_command: &str,
|
||||
) -> ExecApprovalRequestEvent {
|
||||
let event = wait_for_event(&test.codex, |event| {
|
||||
matches!(
|
||||
@@ -565,7 +558,12 @@ async fn expect_exec_approval(
|
||||
|
||||
match event {
|
||||
EventMsg::ExecApprovalRequest(approval) => {
|
||||
assert_eq!(approval.command, expected_command);
|
||||
let last_arg = approval
|
||||
.command
|
||||
.last()
|
||||
.map(std::string::String::as_str)
|
||||
.unwrap_or_default();
|
||||
assert_eq!(last_arg, expected_command);
|
||||
approval
|
||||
}
|
||||
EventMsg::TaskComplete(_) => panic!("expected approval request before completion"),
|
||||
@@ -660,7 +658,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
features: vec![],
|
||||
model_override: Some("gpt-5.1"),
|
||||
outcome: Outcome::Auto,
|
||||
expectation: Expectation::FileCreatedNoExitCode {
|
||||
expectation: Expectation::FileCreated {
|
||||
target: TargetPath::OutsideWorkspace("dfa_on_request_5_1.txt"),
|
||||
content: "danger-on-request",
|
||||
},
|
||||
@@ -702,7 +700,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-unless"],
|
||||
command: "echo trusted-unless",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -717,7 +715,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-unless"],
|
||||
command: "echo trusted-unless",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -880,7 +878,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-read-only"],
|
||||
command: "echo trusted-read-only",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -895,7 +893,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-read-only"],
|
||||
command: "echo trusted-read-only",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -1020,7 +1018,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
},
|
||||
},
|
||||
ScenarioSpec {
|
||||
name: "apply_patch_shell_requires_patch_approval",
|
||||
name: "apply_patch_shell_command_requires_patch_approval",
|
||||
approval_policy: UnlessTrusted,
|
||||
sandbox_policy: workspace_write(false),
|
||||
action: ActionKind::ApplyPatchShell {
|
||||
@@ -1114,7 +1112,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
},
|
||||
},
|
||||
ScenarioSpec {
|
||||
name: "apply_patch_shell_outside_requires_patch_approval",
|
||||
name: "apply_patch_shell_command_outside_requires_patch_approval",
|
||||
approval_policy: OnRequest,
|
||||
sandbox_policy: workspace_write(false),
|
||||
action: ActionKind::ApplyPatchShell {
|
||||
@@ -1229,7 +1227,10 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
message_contains: if cfg!(target_os = "linux") {
|
||||
&["Permission denied"]
|
||||
} else {
|
||||
&["Permission denied|Operation not permitted|Read-only file system"]
|
||||
&[
|
||||
"Permission denied|Operation not permitted|operation not permitted|\
|
||||
Read-only file system",
|
||||
]
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1238,7 +1239,7 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
approval_policy: Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
action: ActionKind::RunCommand {
|
||||
command: &["echo", "trusted-never"],
|
||||
command: "echo trusted-never",
|
||||
},
|
||||
with_escalated_permissions: false,
|
||||
features: vec![],
|
||||
@@ -1373,7 +1374,10 @@ fn scenarios() -> Vec<ScenarioSpec> {
|
||||
message_contains: if cfg!(target_os = "linux") {
|
||||
&["Permission denied"]
|
||||
} else {
|
||||
&["Permission denied|Operation not permitted|Read-only file system"]
|
||||
&[
|
||||
"Permission denied|Operation not permitted|operation not permitted|\
|
||||
Read-only file system",
|
||||
]
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1509,7 +1513,7 @@ async fn run_scenario(scenario: &ScenarioSpec) -> Result<()> {
|
||||
expected_reason,
|
||||
} => {
|
||||
let command = expected_command
|
||||
.as_ref()
|
||||
.as_deref()
|
||||
.expect("exec approval requires shell command");
|
||||
let approval = expect_exec_approval(&test, command).await;
|
||||
if let Some(expected_reason) = expected_reason {
|
||||
|
||||
@@ -499,9 +499,20 @@ async fn integration_git_info_unit_test() {
|
||||
"Git info should contain repository_url"
|
||||
);
|
||||
let repo_url = git_info.repository_url.as_ref().unwrap();
|
||||
// Some hosts rewrite remotes (e.g., github.com → git@github.com), so assert against
|
||||
// the actual remote reported by git instead of a static URL.
|
||||
let expected_remote_url = std::process::Command::new("git")
|
||||
.args(["remote", "get-url", "origin"])
|
||||
.current_dir(&git_repo)
|
||||
.output()
|
||||
.unwrap();
|
||||
let expected_remote_url = String::from_utf8(expected_remote_url.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
assert_eq!(
|
||||
repo_url, "https://github.com/example/integration-test.git",
|
||||
"Repository URL should match what we configured"
|
||||
repo_url, &expected_remote_url,
|
||||
"Repository URL should match git remote get-url output"
|
||||
);
|
||||
|
||||
println!("✅ Git info collection test passed!");
|
||||
|
||||
@@ -992,7 +992,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
id: Some("web-search-id".into()),
|
||||
status: Some("completed".into()),
|
||||
action: WebSearchAction::Search {
|
||||
query: "weather".into(),
|
||||
query: Some("weather".into()),
|
||||
},
|
||||
});
|
||||
prompt.input.push(ResponseItem::FunctionCall {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
@@ -25,17 +26,17 @@ use pretty_assertions::assert_eq;
|
||||
async fn codex_delegate_forwards_exec_approval_and_proceeds_on_approval() {
|
||||
skip_if_no_network!();
|
||||
|
||||
// Sub-agent turn 1: emit a shell function_call requiring approval, then complete.
|
||||
// Sub-agent turn 1: emit a shell_command function_call requiring approval, then complete.
|
||||
let call_id = "call-exec-1";
|
||||
let args = serde_json::json!({
|
||||
"command": ["bash", "-lc", "rm -rf delegated"],
|
||||
"command": "rm -rf delegated",
|
||||
"timeout_ms": 1000,
|
||||
"with_escalated_permissions": true,
|
||||
})
|
||||
.to_string();
|
||||
let sse1 = sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &args),
|
||||
ev_function_call(call_id, "shell_command", &args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
|
||||
@@ -61,6 +62,8 @@ async fn codex_delegate_forwards_exec_approval_and_proceeds_on_approval() {
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.approval_policy = AskForApproval::OnRequest;
|
||||
config.sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5.1").expect("gpt-5.1 is a valid model");
|
||||
});
|
||||
let test = builder.build(&server).await.expect("build test codex");
|
||||
|
||||
@@ -138,6 +141,8 @@ async fn codex_delegate_forwards_patch_approval_and_proceeds_on_decision() {
|
||||
// Use a restricted sandbox so patch approval is required
|
||||
config.sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
config.include_apply_patch_tool = true;
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family = find_family_for_model("gpt-5.1").expect("gpt-5.1 is a valid model");
|
||||
});
|
||||
let test = builder.build(&server).await.expect("build test codex");
|
||||
|
||||
|
||||
@@ -37,8 +37,6 @@ async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
@@ -21,6 +22,11 @@ use std::fs;
|
||||
|
||||
#[tokio::test]
|
||||
async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
// TODO execpolicy doesn't parse powershell commands yet
|
||||
if cfg!(windows) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
let policy_path = config.codex_home.join("policy").join("policy.codexpolicy");
|
||||
fs::create_dir_all(
|
||||
@@ -34,13 +40,16 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
r#"prefix_rule(pattern=["echo"], decision="forbidden")"#,
|
||||
)
|
||||
.expect("write policy file");
|
||||
config.model = "gpt-5.1".to_string();
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5.1").expect("gpt-5.1 should have a model family");
|
||||
});
|
||||
let server = start_mock_server().await;
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-forbidden";
|
||||
let args = json!({
|
||||
"command": ["echo", "blocked"],
|
||||
"command": "echo blocked",
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
|
||||
@@ -48,7 +57,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -49,6 +49,7 @@ mod seatbelt;
|
||||
mod shell_serialization;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
mod subagent_exec_events;
|
||||
mod text_encoding_fix;
|
||||
mod tool_harness;
|
||||
mod tool_parallelism;
|
||||
|
||||
@@ -121,4 +121,19 @@ async fn model_selects_expected_tools() {
|
||||
],
|
||||
"gpt-5.1 should expose the apply_patch tool",
|
||||
);
|
||||
let exp_tools = collect_tool_identifiers_for_model("exp-5.1").await;
|
||||
assert_eq!(
|
||||
exp_tools,
|
||||
vec![
|
||||
"exec_command".to_string(),
|
||||
"write_stdin".to_string(),
|
||||
"list_mcp_resources".to_string(),
|
||||
"list_mcp_resource_templates".to_string(),
|
||||
"read_mcp_resource".to_string(),
|
||||
"update_plan".to_string(),
|
||||
"apply_patch".to_string(),
|
||||
"view_image".to_string()
|
||||
],
|
||||
"exp-5.1 should expose the apply_patch tool",
|
||||
);
|
||||
}
|
||||
|
||||
110
codex-rs/core/tests/suite/subagent_exec_events.rs
Normal file
110
codex-rs/core/tests/suite/subagent_exec_events.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecCommandBeginEvent;
|
||||
use codex_core::protocol::ExecCommandEndEvent;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event_with_timeout;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
|
||||
fn is_exec_begin(ev: &EventMsg) -> Option<ExecCommandBeginEvent> {
|
||||
if let EventMsg::ExecCommandBegin(ev) = ev {
|
||||
Some(ev.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_exec_end(ev: &EventMsg) -> Option<ExecCommandEndEvent> {
|
||||
if let EventMsg::ExecCommandEnd(ev) = ev {
|
||||
Some(ev.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
#[ignore = "relies on streaming timing; kept for manual verification"]
|
||||
async fn root_subagent_tool_emits_exec_events() -> Result<()> {
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "subagent-call";
|
||||
let args = json!({});
|
||||
|
||||
// First completion triggers the subagent tool call.
|
||||
core_test_support::responses::mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "subagent_list", &args.to_string()),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
// Second completion finishes the turn.
|
||||
core_test_support::responses::mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-2"),
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
text: "spawn one please".to_string(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: test.cwd.path().to_path_buf(),
|
||||
approval_policy: codex_core::protocol::AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: test.session_configured.model.clone(),
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut begin: Option<ExecCommandBeginEvent> = None;
|
||||
let mut end: Option<ExecCommandEndEvent> = None;
|
||||
for _ in 0..40 {
|
||||
let ev = wait_for_event_with_timeout(&test.codex, |_| true, Duration::from_secs(20)).await;
|
||||
if begin.is_none() {
|
||||
begin = is_exec_begin(&ev);
|
||||
}
|
||||
if end.is_none() {
|
||||
end = is_exec_end(&ev);
|
||||
}
|
||||
if matches!(ev, EventMsg::TaskComplete(_)) && begin.is_some() && end.is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let begin = begin.expect("exec begin");
|
||||
assert_eq!(
|
||||
begin.command.first().map(String::as_str),
|
||||
Some("subagent_list")
|
||||
);
|
||||
let end = end.expect("exec end");
|
||||
assert_eq!(end.call_id, begin.call_id);
|
||||
assert_eq!(end.exit_code, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -146,10 +146,11 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let test = test_codex().build(&server).await?;
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let shell_args = json!({
|
||||
"command": ["/bin/sh", "-c", "sleep 0.3"],
|
||||
"command": "sleep 0.3",
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
let args_one = serde_json::to_string(&shell_args)?;
|
||||
@@ -157,8 +158,8 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
|
||||
let first_response = sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "shell", &args_one),
|
||||
ev_function_call("call-2", "shell", &args_two),
|
||||
ev_function_call("call-1", "shell_command", &args_one),
|
||||
ev_function_call("call-2", "shell_command", &args_two),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
let second_response = sse(vec![
|
||||
@@ -167,7 +168,7 @@ async fn non_parallel_tools_run_serially() -> anyhow::Result<()> {
|
||||
]);
|
||||
mount_sse_sequence(&server, vec![first_response, second_response]).await;
|
||||
|
||||
let duration = run_turn_and_measure(&test, "run shell twice").await?;
|
||||
let duration = run_turn_and_measure(&test, "run shell_command twice").await?;
|
||||
assert_serial_duration(duration);
|
||||
|
||||
Ok(())
|
||||
@@ -185,14 +186,14 @@ async fn mixed_tools_fall_back_to_serial() -> anyhow::Result<()> {
|
||||
})
|
||||
.to_string();
|
||||
let shell_args = serde_json::to_string(&json!({
|
||||
"command": ["/bin/sh", "-c", "sleep 0.3"],
|
||||
"command": "sleep 0.3",
|
||||
"timeout_ms": 1_000,
|
||||
}))?;
|
||||
|
||||
let first_response = sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "test_sync_tool", &sync_args),
|
||||
ev_function_call("call-2", "shell", &shell_args),
|
||||
ev_function_call("call-2", "shell_command", &shell_args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
let second_response = sse(vec![
|
||||
@@ -215,7 +216,7 @@ async fn tool_results_grouped() -> anyhow::Result<()> {
|
||||
let test = build_codex_with_test_tool(&server).await?;
|
||||
|
||||
let shell_args = serde_json::to_string(&json!({
|
||||
"command": ["/bin/sh", "-c", "echo 'shell output'"],
|
||||
"command": "echo 'shell output'",
|
||||
"timeout_ms": 1_000,
|
||||
}))?;
|
||||
|
||||
@@ -223,9 +224,9 @@ async fn tool_results_grouped() -> anyhow::Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call("call-1", "shell", &shell_args),
|
||||
ev_function_call("call-2", "shell", &shell_args),
|
||||
ev_function_call("call-3", "shell", &shell_args),
|
||||
ev_function_call("call-1", "shell_command", &shell_args),
|
||||
ev_function_call("call-2", "shell_command", &shell_args),
|
||||
ev_function_call("call-3", "shell_command", &shell_args),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -98,7 +98,7 @@ async fn truncate_function_error_trims_respond_to_model() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
@@ -106,7 +106,7 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| {
|
||||
config.tool_output_token_limit = Some(100_000);
|
||||
});
|
||||
@@ -114,28 +114,22 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -167,7 +161,10 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
"expected truncated shell output to be plain text"
|
||||
);
|
||||
|
||||
assert_eq!(output.len(), 400097, "we should be almost 100k tokens");
|
||||
assert!(
|
||||
(400000..=401000).contains(&output.len()),
|
||||
"we should be almost 100k tokens"
|
||||
);
|
||||
|
||||
assert!(
|
||||
!output.contains("tokens truncated"),
|
||||
@@ -177,7 +174,7 @@ async fn tool_call_output_configured_limit_chars_type() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
@@ -185,34 +182,28 @@ async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_model("gpt-5.1");
|
||||
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -250,14 +241,14 @@ async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> {
|
||||
|
||||
let len = output.len();
|
||||
assert!(
|
||||
(9_900..=10_000).contains(&len),
|
||||
(9_900..=10_100).contains(&len),
|
||||
"expected ~10k chars after truncation, got {len}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies that a standard tool call (shell) exceeding the model formatting
|
||||
// Verifies that a standard tool call (shell_command) exceeding the model formatting
|
||||
// limits is truncated before being sent back to the model.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
@@ -265,7 +256,7 @@ async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
// Use a model that exposes the generic shell tool.
|
||||
// Use a model that exposes the shell_command tool.
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.model = "gpt-5.1-codex".to_string();
|
||||
config.model_family =
|
||||
@@ -274,28 +265,22 @@ async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> {
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-too-large";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 100000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 100000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 100000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
// First response: model tells us to run the tool; second: complete the turn.
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -345,7 +330,7 @@ $"#;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ensures shell tool outputs that exceed the line limit are truncated only once.
|
||||
// Ensures shell_command outputs that exceed the line limit are truncated only once.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn tool_call_output_truncated_only_once() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -359,27 +344,21 @@ async fn tool_call_output_truncated_only_once() -> Result<()> {
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
let call_id = "shell-single-truncation";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 10000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
let command = if cfg!(windows) {
|
||||
"for ($i=1; $i -le 10000; $i++) { Write-Output $i }"
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 10000"],
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
"seq 1 10000"
|
||||
};
|
||||
let args = serde_json::json!({
|
||||
"command": command,
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
mount_sse_once(
|
||||
&server,
|
||||
sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -619,7 +598,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
|
||||
let call_id = "shell-token-marker";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 150"],
|
||||
"command": "seq 1 150",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
@@ -627,7 +606,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -650,7 +629,7 @@ async fn token_policy_marker_reports_tokens() -> Result<()> {
|
||||
.function_call_output_text(call_id)
|
||||
.context("shell output present")?;
|
||||
|
||||
let pattern = r#"(?s)^\{"output":"Total output lines: 150\\n\\n1\\n2\\n3\\n4\\n5\\n.*?…\d+ tokens truncated…7\\n138\\n139\\n140\\n141\\n142\\n143\\n144\\n145\\n146\\n147\\n148\\n149\\n150\\n","metadata":\{"exit_code":0,"duration_seconds":0\.0\}\}$"#;
|
||||
let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*tokens truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$";
|
||||
|
||||
assert_regex_match(pattern, &output);
|
||||
|
||||
@@ -672,7 +651,7 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
|
||||
let call_id = "shell-byte-marker";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 150"],
|
||||
"command": "seq 1 150",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
|
||||
@@ -680,7 +659,7 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -703,16 +682,16 @@ async fn byte_policy_marker_reports_bytes() -> Result<()> {
|
||||
.function_call_output_text(call_id)
|
||||
.context("shell output present")?;
|
||||
|
||||
let pattern = r#"(?s)^\{"output":"Total output lines: 150\\n\\n1\\n2\\n3\\n4\\n5.*?…\d+ chars truncated…7\\n138\\n139\\n140\\n141\\n142\\n143\\n144\\n145\\n146\\n147\\n148\\n149\\n150\\n","metadata":\{"exit_code":0,"duration_seconds":0\.0\}\}$"#;
|
||||
let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*chars truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$";
|
||||
|
||||
assert_regex_match(pattern, &output);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Shell tool output should remain intact when the config opts into a large token budget.
|
||||
// shell_command output should remain intact when the config opts into a large token budget.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
async fn shell_command_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
@@ -726,7 +705,7 @@ async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
|
||||
let call_id = "shell-no-trunc";
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 1000"],
|
||||
"command": "seq 1 1000",
|
||||
"timeout_ms": 5_000,
|
||||
});
|
||||
let expected_body: String = (1..=1000).map(|i| format!("{i}\n")).collect();
|
||||
@@ -735,7 +714,7 @@ async fn shell_tool_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
|
||||
@@ -279,23 +279,19 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
config.tool_output_token_limit = Some(100);
|
||||
config.model = "gpt-5.1-codex".to_string();
|
||||
config.model_family =
|
||||
find_family_for_model("gpt-5-codex").expect("gpt-5-codex is a model family");
|
||||
find_family_for_model("gpt-5.1-codex").expect("gpt-5.1-codex is a model family");
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
let call_id = "user-shell-double-truncation";
|
||||
let args = if cfg!(windows) {
|
||||
serde_json::json!({
|
||||
"command": [
|
||||
"powershell",
|
||||
"-Command",
|
||||
"for ($i=1; $i -le 2000; $i++) { Write-Output $i }"
|
||||
],
|
||||
"command": "for ($i=1; $i -le 2000; $i++) { Write-Output $i }",
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"command": ["/bin/sh", "-c", "seq 1 2000"],
|
||||
"command": "seq 1 2000",
|
||||
"timeout_ms": 5_000,
|
||||
})
|
||||
};
|
||||
@@ -304,7 +300,7 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
&server,
|
||||
sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
@@ -319,19 +315,22 @@ async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> {
|
||||
.await;
|
||||
|
||||
fixture
|
||||
.submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess)
|
||||
.submit_turn_with_policy(
|
||||
"trigger big shell_command output",
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let output = mock2
|
||||
.single_request()
|
||||
.function_call_output_text(call_id)
|
||||
.context("function_call_output present for shell call")?;
|
||||
.context("function_call_output present for shell_command call")?;
|
||||
|
||||
let truncation_headers = output.matches("Total output lines:").count();
|
||||
|
||||
assert_eq!(
|
||||
truncation_headers, 1,
|
||||
"shell output should carry only one truncation header: {output}"
|
||||
"shell_command output should carry only one truncation header: {output}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
Overview of Protocol Defined in [protocol.rs](../core/src/protocol.rs) and [agent.rs](../core/src/agent.rs).
|
||||
Overview of Protocol Defined in [protocol.rs](../protocol/src/protocol.rs) and the Codex engine in [codex.rs](../core/src/codex.rs).
|
||||
|
||||
The goal of this document is to define terminology used in the system and explain the expected behavior of the system.
|
||||
|
||||
NOTE: The code might not completely match this spec. There are a few minor changes that need to be made after this spec has been reviewed, which will not alter the existing TUI's functionality.
|
||||
NOTE: This document summarizes the protocol at a high level. The Rust types and enums in [protocol.rs](../protocol/src/protocol.rs) are the source of truth and may occasionally include additional fields or variants beyond what is covered here.
|
||||
|
||||
## Entities
|
||||
|
||||
These are entities exit on the codex backend. The intent of this section is to establish vocabulary and construct a shared mental model for the `Codex` core system.
|
||||
These are entities that exist on the Codex backend. The intent of this section is to establish vocabulary and construct a shared mental model for the `Codex` core system.
|
||||
|
||||
0. `Model`
|
||||
- In our case, this is the Responses REST API
|
||||
@@ -42,9 +42,16 @@ These are entities exit on the codex backend. The intent of this section is to e
|
||||
|
||||
The term "UI" is used to refer to the application driving `Codex`. This may be the CLI / TUI chat-like interface that users operate, or it may be a GUI interface like a VSCode extension. The UI is external to `Codex`, as `Codex` is intended to be operated by arbitrary UI implementations.
|
||||
|
||||
### Agent identifiers
|
||||
|
||||
Every participant in a session (the root UI thread plus each spawned/forked child) is assigned a monotonically increasing numeric `AgentId`. Agent `0` is always the root thread. Subagents inherit their parent's `AgentId` as `parent_agent_id` so UIs can correlate trees even when conversations are forked or exported. These IDs are surfaced in `SubagentSummary` payloads and in a dedicated inbox event described below.
|
||||
|
||||
When a `Turn` completes, the `response_id` from the `Model`'s final `response.completed` message is stored in the `Session` state to resume the thread given the next `Op::UserInput`. The `response_id` is also returned in the `EventMsg::TurnComplete` to the UI, which can be used to fork the thread from an earlier point by providing it in the `Op::UserInput`.
|
||||
|
||||
Since only 1 `Task` can be run at a time, for parallel tasks it is recommended that a single `Codex` be run for each thread of work.
|
||||
Each `Session` still runs at most one `Task` at a time. For parallel work, you can either run multiple Codex sessions or use subagents (via the `subagent_*` tools) to orchestrate multiple child sessions within a single daemon.
|
||||
|
||||
Subagent sessions run in parallel with the root thread, so you scale overlapping conversations without launching new daemons.
|
||||
Enable the `subagent_tools` feature flag (see `../../docs/config.md#feature-flags`) and tune how many child sessions stay active with `max_active_subagents` (`../../docs/config.md#max_active_subagents`).
|
||||
|
||||
## Interface
|
||||
|
||||
@@ -62,7 +69,7 @@ Since only 1 `Task` can be run at a time, for parallel tasks it is recommended t
|
||||
- This enum is `non_exhaustive`; variants can be added at future dates
|
||||
- It should be expected that new `EventMsg` variants will be added over time to expose more detailed information about the model's actions.
|
||||
|
||||
For complete documentation of the `Op` and `EventMsg` variants, refer to [protocol.rs](../core/src/protocol.rs). Some example payload types:
|
||||
For complete documentation of the `Op` and `EventMsg` variants, refer to [protocol.rs](../protocol/src/protocol.rs). Some example payload types:
|
||||
|
||||
- `Op`
|
||||
- `Op::UserInput` – Any input from the user to kick off a `Task`
|
||||
@@ -75,6 +82,15 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc
|
||||
- `EventMsg::Error` – A task stopped with an error
|
||||
- `EventMsg::Warning` – A non-fatal warning that the client should surface to the user
|
||||
- `EventMsg::TurnComplete` – Contains a `response_id` bookmark for last `response_id` executed by the task. This can be used to continue the task at a later point in time, perhaps with additional user input.
|
||||
- `EventMsg::SubagentLifecycle` – Emits `SubagentSummary` payloads that describe each child session, including its `agent_id`, `parent_agent_id`, and current pending inbox counts.
|
||||
These lifecycle events are emitted whenever the daemon’s view of a subagent changes (creation, status/reasoning-header updates, or removal). They also persist in rollout files so `codex resume` can rebuild prior subagent state—including attachments on spawn/fork and detach on cancel/prune—before replaying model turns.
|
||||
- `EventMsg::AgentInbox` – Notifies the UI when a subagent’s inbox depth changes, for example after the parent sends an interrupt or a watchdog ping arrives. The payload includes the target `agent_id`, `session_id`, and the counts of pending regular vs interrupt messages so UIs can render badges without polling.
|
||||
For example, if the root interrupts child agent `3`, the UI may receive an `AgentInbox` event for `agent_id = 3` showing one pending interrupt message and zero regular messages.
|
||||
|
||||
#### Subagent tool reminders
|
||||
|
||||
- `subagent_await` accepts an optional `timeout_s` capped at 1,800 s (30 minutes). Omit it or pass `0` to use the 30-minute default. Each `timeout_s` must be at least 300 s (5 minutes); prefer 5–30 minute timeouts and use backoff (for example, 300s → 600s → 1,200s) so you can check on children, log progress, or deliver interrupts instead of parking for the full cap.
|
||||
- `subagent_logs` is read-only and does not change a child’s state; prefer it when you only need to inspect recent activity without advancing the subagent.
|
||||
|
||||
The `response_id` returned from each task matches the OpenAI `response_id` stored in the API's `/responses` endpoint. It can be stored and used in future `Sessions` to resume threads of work.
|
||||
|
||||
|
||||
@@ -86,8 +86,6 @@ impl EscalateServer {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
},
|
||||
get_platform_sandbox().unwrap_or(SandboxType::None),
|
||||
&sandbox_policy,
|
||||
|
||||
@@ -91,6 +91,9 @@ pub struct Cli {
|
||||
pub enum Command {
|
||||
/// Resume a previous session by id or pick the most recent with --last.
|
||||
Resume(ResumeArgs),
|
||||
|
||||
/// Fork an existing session into a new branch and immediately run a prompt.
|
||||
Fork(ForkArgs),
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
@@ -109,6 +112,17 @@ pub struct ResumeArgs {
|
||||
pub prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct ForkArgs {
|
||||
/// Session id (UUID) to fork into a new branch.
|
||||
#[arg(value_name = "SESSION_ID")]
|
||||
pub session_id: String,
|
||||
|
||||
/// Prompt to send after forking. If `-` is used, read from stdin.
|
||||
#[arg(value_name = "PROMPT", value_hint = clap::ValueHint::Other)]
|
||||
pub prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, ValueEnum)]
|
||||
#[value(rename_all = "kebab-case")]
|
||||
pub enum Color {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use codex_common::elapsed::format_duration;
|
||||
use codex_common::elapsed::format_elapsed;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::AgentInboxEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::AgentReasoningRawContentEvent;
|
||||
use codex_core::protocol::BackgroundEventEvent;
|
||||
@@ -18,11 +19,14 @@ use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::SubagentLifecycleEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TurnAbortReason;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
use codex_core::protocol::WarningEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_protocol::AgentId;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::num_format::format_with_separators;
|
||||
use owo_colors::OwoColorize;
|
||||
use owo_colors::Style;
|
||||
@@ -43,6 +47,7 @@ use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
|
||||
pub(crate) struct EventProcessorWithHumanOutput {
|
||||
call_id_to_patch: HashMap<String, PatchApplyBegin>,
|
||||
subagents: HashMap<ConversationId, SubagentCliInfo>,
|
||||
|
||||
// To ensure that --color=never is respected, ANSI escapes _must_ be added
|
||||
// using .style() with one of these fields. If you need a new style, add a
|
||||
@@ -65,6 +70,11 @@ pub(crate) struct EventProcessorWithHumanOutput {
|
||||
final_message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SubagentCliInfo {
|
||||
label: Option<String>,
|
||||
}
|
||||
|
||||
impl EventProcessorWithHumanOutput {
|
||||
pub(crate) fn create_with_ansi(
|
||||
with_ansi: bool,
|
||||
@@ -76,6 +86,7 @@ impl EventProcessorWithHumanOutput {
|
||||
if with_ansi {
|
||||
Self {
|
||||
call_id_to_patch,
|
||||
subagents: HashMap::new(),
|
||||
bold: Style::new().bold(),
|
||||
italic: Style::new().italic(),
|
||||
dimmed: Style::new().dimmed(),
|
||||
@@ -93,6 +104,7 @@ impl EventProcessorWithHumanOutput {
|
||||
} else {
|
||||
Self {
|
||||
call_id_to_patch,
|
||||
subagents: HashMap::new(),
|
||||
bold: Style::new(),
|
||||
italic: Style::new(),
|
||||
dimmed: Style::new(),
|
||||
@@ -546,6 +558,8 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
ts_msg!(self, "task aborted: review ended");
|
||||
}
|
||||
},
|
||||
EventMsg::SubagentLifecycle(ev) => self.log_subagent_lifecycle(ev),
|
||||
EventMsg::AgentInbox(ev) => self.log_agent_inbox(ev),
|
||||
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
|
||||
EventMsg::WebSearchBegin(_)
|
||||
| EventMsg::ExecApprovalRequest(_)
|
||||
@@ -595,6 +609,106 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
}
|
||||
}
|
||||
impl EventProcessorWithHumanOutput {
|
||||
fn log_subagent_lifecycle(&mut self, event: SubagentLifecycleEvent) {
|
||||
match event {
|
||||
SubagentLifecycleEvent::Created(created) => {
|
||||
let name = self.render_subagent_name(
|
||||
&created.subagent.session_id,
|
||||
created.subagent.agent_id,
|
||||
created.subagent.label.as_deref(),
|
||||
);
|
||||
self.subagents.insert(
|
||||
created.subagent.session_id,
|
||||
SubagentCliInfo {
|
||||
label: created.subagent.label.clone(),
|
||||
},
|
||||
);
|
||||
ts_msg!(
|
||||
self,
|
||||
"[#{}] {} created via {:?}",
|
||||
created.subagent.agent_id,
|
||||
name.style(self.cyan),
|
||||
created.subagent.origin
|
||||
);
|
||||
}
|
||||
SubagentLifecycleEvent::Status(status) => {
|
||||
let name = self.render_subagent_name(&status.session_id, status.agent_id, None);
|
||||
ts_msg!(
|
||||
self,
|
||||
"[#{}] {} status → {:?}",
|
||||
status.agent_id,
|
||||
name.style(self.cyan),
|
||||
status.status
|
||||
);
|
||||
}
|
||||
SubagentLifecycleEvent::ReasoningHeader(reasoning) => {
|
||||
let name =
|
||||
self.render_subagent_name(&reasoning.session_id, reasoning.agent_id, None);
|
||||
ts_msg!(
|
||||
self,
|
||||
"[#{}] {} header: {}",
|
||||
reasoning.agent_id,
|
||||
name.style(self.cyan),
|
||||
reasoning.reasoning_header.style(self.italic)
|
||||
);
|
||||
}
|
||||
SubagentLifecycleEvent::Deleted(removed) => {
|
||||
let name = self.render_subagent_name(&removed.session_id, removed.agent_id, None);
|
||||
self.subagents.remove(&removed.session_id);
|
||||
ts_msg!(
|
||||
self,
|
||||
"[#{}] {} removed",
|
||||
removed.agent_id,
|
||||
name.style(self.cyan)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn log_agent_inbox(&mut self, event: AgentInboxEvent) {
|
||||
let name = self.render_subagent_name(&event.session_id, event.agent_id, None);
|
||||
ts_msg!(
|
||||
self,
|
||||
"[#{}] {} inbox: {} msg{}, {} interrupt{}",
|
||||
event.agent_id,
|
||||
name.style(self.cyan),
|
||||
event.pending_messages,
|
||||
if event.pending_messages == 1 { "" } else { "s" },
|
||||
event.pending_interrupts,
|
||||
if event.pending_interrupts == 1 {
|
||||
""
|
||||
} else {
|
||||
"s"
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
fn render_subagent_name(
|
||||
&self,
|
||||
session_id: &ConversationId,
|
||||
agent_id: AgentId,
|
||||
explicit: Option<&str>,
|
||||
) -> String {
|
||||
if let Some(name) = explicit
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(std::string::ToString::to_string)
|
||||
.or_else(|| {
|
||||
self.subagents
|
||||
.get(session_id)
|
||||
.and_then(|info| info.label.as_deref())
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(std::string::ToString::to_string)
|
||||
})
|
||||
{
|
||||
return name;
|
||||
}
|
||||
let short = session_id.to_string().chars().take(8).collect::<String>();
|
||||
format!("subagent #{agent_id} ({short})")
|
||||
}
|
||||
}
|
||||
|
||||
fn escape_command(command: &[String]) -> String {
|
||||
try_join(command.iter().map(String::as_str)).unwrap_or_else(|_| command.join(" "))
|
||||
|
||||
@@ -144,6 +144,7 @@ pub enum CommandExecutionStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Declined,
|
||||
}
|
||||
|
||||
/// A command executed by the agent.
|
||||
|
||||
@@ -10,6 +10,8 @@ mod event_processor_with_human_output;
|
||||
pub mod event_processor_with_jsonl_output;
|
||||
pub mod exec_events;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::anyhow;
|
||||
pub use cli::Cli;
|
||||
use codex_common::oss::ensure_oss_provider_ready;
|
||||
use codex_common::oss::get_default_model_for_oss_provider;
|
||||
@@ -18,6 +20,7 @@ use codex_core::ConversationManager;
|
||||
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::OLLAMA_OSS_PROVIDER_ID;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::auth::enforce_login_restrictions;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
@@ -31,6 +34,7 @@ use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use event_processor_with_human_output::EventProcessorWithHumanOutput;
|
||||
use event_processor_with_jsonl_output::EventProcessorWithJsonOutput;
|
||||
@@ -39,6 +43,7 @@ use serde_json::Value;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use supports_color::Stream;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
@@ -97,6 +102,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
});
|
||||
resume_prompt.or(prompt)
|
||||
}
|
||||
Some(ExecCommand::Fork(args)) => args.prompt.clone().or(prompt),
|
||||
None => prompt,
|
||||
};
|
||||
|
||||
@@ -240,6 +246,10 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
base_instructions: None,
|
||||
developer_instructions: None,
|
||||
compact_prompt: None,
|
||||
max_active_subagents: None,
|
||||
root_agent_uses_user_messages: None,
|
||||
subagent_root_inbox_autosubmit: None,
|
||||
subagent_inbox_inject_before_tools: None,
|
||||
include_apply_patch_tool: None,
|
||||
show_raw_agent_reasoning: oss.then_some(true),
|
||||
tools_web_search_request: None,
|
||||
@@ -323,27 +333,39 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
);
|
||||
let conversation_manager = ConversationManager::new(auth_manager.clone(), SessionSource::Exec);
|
||||
|
||||
// Handle resume subcommand by resolving a rollout path and using explicit resume API.
|
||||
// Handle resume/fork subcommands by resolving a rollout path and using the explicit resume API.
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
session_configured,
|
||||
} = if let Some(ExecCommand::Resume(args)) = command {
|
||||
let resume_path = resolve_resume_path(&config, &args).await?;
|
||||
|
||||
if let Some(path) = resume_path {
|
||||
conversation_manager
|
||||
.resume_conversation_from_rollout(config.clone(), path, auth_manager.clone())
|
||||
.await?
|
||||
} else {
|
||||
} = match &command {
|
||||
Some(ExecCommand::Resume(args)) => {
|
||||
let resume_path = resolve_resume_path(&config, args).await?;
|
||||
if let Some(path) = resume_path {
|
||||
conversation_manager
|
||||
.resume_conversation_from_rollout(config.clone(), path, auth_manager.clone())
|
||||
.await?
|
||||
} else {
|
||||
conversation_manager
|
||||
.new_conversation(config.clone())
|
||||
.await?
|
||||
}
|
||||
}
|
||||
Some(ExecCommand::Fork(args)) => {
|
||||
let path = resolve_fork_path(&config, &args.session_id).await?;
|
||||
fork_conversation_from_rollout(
|
||||
&conversation_manager,
|
||||
&config,
|
||||
auth_manager.clone(),
|
||||
path,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
None => {
|
||||
conversation_manager
|
||||
.new_conversation(config.clone())
|
||||
.await?
|
||||
}
|
||||
} else {
|
||||
conversation_manager
|
||||
.new_conversation(config.clone())
|
||||
.await?
|
||||
};
|
||||
// Print the effective configuration and prompt so users can see what Codex
|
||||
// is using.
|
||||
@@ -467,6 +489,32 @@ async fn resolve_resume_path(
|
||||
}
|
||||
}
|
||||
|
||||
async fn resolve_fork_path(config: &Config, session_id: &str) -> anyhow::Result<PathBuf> {
|
||||
find_conversation_path_by_id_str(&config.codex_home, session_id)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow!("No session with id {session_id} found"))
|
||||
}
|
||||
|
||||
async fn fork_conversation_from_rollout(
|
||||
conversation_manager: &ConversationManager,
|
||||
config: &Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
path: PathBuf,
|
||||
) -> anyhow::Result<NewConversation> {
|
||||
let history = RolloutRecorder::get_rollout_history(&path)
|
||||
.await
|
||||
.context("failed to read session history for fork")?;
|
||||
let fork_history = match history {
|
||||
InitialHistory::New => InitialHistory::New,
|
||||
InitialHistory::Resumed(resumed) => InitialHistory::Forked(resumed.history),
|
||||
InitialHistory::Forked(items) => InitialHistory::Forked(items),
|
||||
};
|
||||
conversation_manager
|
||||
.resume_conversation_with_history(config.clone(), fork_history, auth_manager)
|
||||
.await
|
||||
.map_err(anyhow::Error::from)
|
||||
}
|
||||
|
||||
fn load_output_schema(path: Option<PathBuf>) -> Option<Value> {
|
||||
let path = path?;
|
||||
|
||||
|
||||
@@ -642,6 +642,7 @@ fn exec_command_end_success_produces_completed_command_item() {
|
||||
cwd: cwd.clone(),
|
||||
parsed_cmd: parsed_cmd.clone(),
|
||||
source: ExecCommandSource::Agent,
|
||||
is_user_shell_command: false,
|
||||
interaction_input: None,
|
||||
}),
|
||||
);
|
||||
@@ -714,6 +715,7 @@ fn exec_command_end_failure_produces_failed_command_item() {
|
||||
cwd: cwd.clone(),
|
||||
parsed_cmd: parsed_cmd.clone(),
|
||||
source: ExecCommandSource::Agent,
|
||||
is_user_shell_command: false,
|
||||
interaction_input: None,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -309,3 +309,70 @@ fn exec_resume_preserves_cli_configuration_overrides() -> anyhow::Result<()> {
|
||||
assert!(content.contains(&marker2));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_fork_creates_new_session() -> anyhow::Result<()> {
|
||||
let test = test_codex_exec();
|
||||
let fixture =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/cli_responses_fixture.sse");
|
||||
|
||||
// Seed conversation.
|
||||
let marker = format!("fork-source-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt)
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let sessions_dir = test.home_path().join("sessions");
|
||||
let source_path = find_session_file_containing_marker(&sessions_dir, &marker)
|
||||
.expect("no session file found after source run");
|
||||
let source_id = extract_conversation_id(&source_path);
|
||||
assert!(
|
||||
!source_id.is_empty(),
|
||||
"missing conversation id in source session"
|
||||
);
|
||||
|
||||
// Fork with a new prompt.
|
||||
let marker2 = format!("fork-branch-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg("fork")
|
||||
.arg(&source_id)
|
||||
.arg(&prompt2)
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let forked_path = find_session_file_containing_marker(&sessions_dir, &marker2)
|
||||
.expect("no forked session file containing new marker");
|
||||
assert_ne!(
|
||||
forked_path, source_path,
|
||||
"fork should produce a new session file"
|
||||
);
|
||||
|
||||
let source_content = std::fs::read_to_string(&source_path)?;
|
||||
assert!(
|
||||
!source_content.contains(&marker2),
|
||||
"source session must remain unchanged"
|
||||
);
|
||||
|
||||
let forked_content = std::fs::read_to_string(&forked_path)?;
|
||||
assert!(
|
||||
forked_content.contains(&marker) && forked_content.contains(&marker2),
|
||||
"forked session should contain both original and new prompts"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
BIN
codex-rs/image.png
Normal file
BIN
codex-rs/image.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
@@ -28,7 +28,7 @@ app-server-test-client *args:
|
||||
|
||||
# format code
|
||||
fmt:
|
||||
cargo fmt -- --config imports_granularity=Item
|
||||
cargo +nightly fmt -- --config imports_granularity=Item
|
||||
|
||||
fix *args:
|
||||
cargo clippy --fix --all-features --tests --allow-dirty "$@"
|
||||
|
||||
@@ -45,8 +45,6 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
|
||||
@@ -150,8 +148,6 @@ async fn assert_network_blocked(cmd: &[&str]) {
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
max_output_tokens: None,
|
||||
max_output_chars: None,
|
||||
};
|
||||
|
||||
let sandbox_policy = SandboxPolicy::new_read_only_policy();
|
||||
|
||||
@@ -166,6 +166,10 @@ impl CodexToolCallParam {
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
max_active_subagents: None,
|
||||
root_agent_uses_user_messages: None,
|
||||
subagent_root_inbox_autosubmit: None,
|
||||
subagent_inbox_inject_before_tools: None,
|
||||
include_apply_patch_tool: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
|
||||
@@ -302,7 +302,9 @@ async fn run_codex_tool_session_inner(
|
||||
| EventMsg::UndoStarted(_)
|
||||
| EventMsg::UndoCompleted(_)
|
||||
| EventMsg::ExitedReviewMode(_)
|
||||
| EventMsg::DeprecationNotice(_) => {
|
||||
| EventMsg::DeprecationNotice(_)
|
||||
| EventMsg::SubagentLifecycle(_)
|
||||
| EventMsg::AgentInbox(_) => {
|
||||
// For now, we do not do anything extra for these
|
||||
// events. Note that
|
||||
// send(codex_event_to_notification(&event)) above has
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user