mirror of
https://github.com/openai/codex.git
synced 2026-02-02 15:03:38 +00:00
Compare commits
1 Commits
worktree-c
...
pr3602
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8b5119504 |
28
.github/dotslash-config.json
vendored
28
.github/dotslash-config.json
vendored
@@ -27,34 +27,6 @@
|
||||
"path": "codex.exe"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codex-responses-api-proxy": {
|
||||
"platforms": {
|
||||
"macos-aarch64": {
|
||||
"regex": "^codex-responses-api-proxy-aarch64-apple-darwin\\.zst$",
|
||||
"path": "codex-responses-api-proxy"
|
||||
},
|
||||
"macos-x86_64": {
|
||||
"regex": "^codex-responses-api-proxy-x86_64-apple-darwin\\.zst$",
|
||||
"path": "codex-responses-api-proxy"
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"regex": "^codex-responses-api-proxy-x86_64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex-responses-api-proxy"
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"regex": "^codex-responses-api-proxy-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex-responses-api-proxy"
|
||||
},
|
||||
"windows-x86_64": {
|
||||
"regex": "^codex-responses-api-proxy-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "codex-responses-api-proxy.exe"
|
||||
},
|
||||
"windows-aarch64": {
|
||||
"regex": "^codex-responses-api-proxy-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "codex-responses-api-proxy.exe"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
23
.github/workflows/ci.yml
vendored
23
.github/workflows/ci.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: ci
|
||||
|
||||
on:
|
||||
pull_request: {}
|
||||
pull_request: { branches: [main] }
|
||||
push: { branches: [main] }
|
||||
|
||||
jobs:
|
||||
@@ -27,27 +27,12 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
# build_npm_package.py requires DotSlash when staging releases.
|
||||
- uses: facebook/install-dotslash@v2
|
||||
# Run all tasks using workspace filters
|
||||
|
||||
- name: Stage npm package
|
||||
id: stage_npm_package
|
||||
- name: Ensure staging a release works.
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
CODEX_VERSION=0.40.0
|
||||
PACK_OUTPUT="${RUNNER_TEMP}/codex-npm.tgz"
|
||||
python3 ./codex-cli/scripts/build_npm_package.py \
|
||||
--release-version "$CODEX_VERSION" \
|
||||
--pack-output "$PACK_OUTPUT"
|
||||
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload staged npm package artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: codex-npm-staging
|
||||
path: ${{ steps.stage_npm_package.outputs.pack_output }}
|
||||
run: ./codex-cli/scripts/stage_release.sh
|
||||
|
||||
- name: Ensure root README.md contains only ASCII and certain Unicode code points
|
||||
run: ./scripts/asciicheck.py README.md
|
||||
|
||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Annotate locations with typos
|
||||
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2.1
|
||||
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
|
||||
with:
|
||||
ignore_words_file: .codespellignore
|
||||
skip: frame*.txt
|
||||
|
||||
6
.github/workflows/rust-ci.yml
vendored
6
.github/workflows/rust-ci.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
- uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2
|
||||
with:
|
||||
tool: cargo-shear
|
||||
@@ -143,7 +143,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
|
||||
124
.github/workflows/rust-release.yml
vendored
124
.github/workflows/rust-release.yml
vendored
@@ -77,7 +77,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
sudo apt install -y musl-tools pkg-config
|
||||
|
||||
- name: Cargo build
|
||||
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||
run: cargo build --target ${{ matrix.target }} --release --bin codex
|
||||
|
||||
- name: Stage artifacts
|
||||
shell: bash
|
||||
@@ -107,10 +107,8 @@ jobs:
|
||||
|
||||
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
|
||||
else
|
||||
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
|
||||
fi
|
||||
|
||||
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
||||
@@ -169,14 +167,6 @@ jobs:
|
||||
needs: build
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
outputs:
|
||||
version: ${{ steps.release_name.outputs.name }}
|
||||
tag: ${{ github.ref_name }}
|
||||
should_publish_npm: ${{ steps.npm_publish_settings.outputs.should_publish }}
|
||||
npm_tag: ${{ steps.npm_publish_settings.outputs.npm_tag }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -197,50 +187,21 @@ jobs:
|
||||
version="${GITHUB_REF_NAME#rust-v}"
|
||||
echo "name=${version}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Determine npm publish settings
|
||||
id: npm_publish_settings
|
||||
env:
|
||||
VERSION: ${{ steps.release_name.outputs.name }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version="${VERSION}"
|
||||
|
||||
if [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "should_publish=true" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=" >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
|
||||
echo "should_publish=true" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=alpha" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
# build_npm_package.py requires DotSlash when staging releases.
|
||||
- uses: facebook/install-dotslash@v2
|
||||
- name: Stage codex CLI npm package
|
||||
- name: Stage npm package
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TMP_DIR="${RUNNER_TEMP}/npm-stage"
|
||||
./codex-cli/scripts/build_npm_package.py \
|
||||
--package codex \
|
||||
python3 codex-cli/scripts/stage_rust_release.py \
|
||||
--release-version "${{ steps.release_name.outputs.name }}" \
|
||||
--staging-dir "${TMP_DIR}" \
|
||||
--pack-output "${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ steps.release_name.outputs.name }}.tgz"
|
||||
|
||||
- name: Stage responses API proxy npm package
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TMP_DIR="${RUNNER_TEMP}/npm-stage-responses"
|
||||
./codex-cli/scripts/build_npm_package.py \
|
||||
--package codex-responses-api-proxy \
|
||||
--release-version "${{ steps.release_name.outputs.name }}" \
|
||||
--staging-dir "${TMP_DIR}" \
|
||||
--pack-output "${GITHUB_WORKSPACE}/dist/npm/codex-responses-api-proxy-npm-${{ steps.release_name.outputs.name }}.tgz"
|
||||
--tmp "${TMP_DIR}"
|
||||
mkdir -p dist/npm
|
||||
# Produce an npm-ready tarball using `npm pack` and store it in dist/npm.
|
||||
# We then rename it to a stable name used by our publishing script.
|
||||
(cd "$TMP_DIR" && npm pack --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
|
||||
mv "${GITHUB_WORKSPACE}"/dist/npm/*.tgz \
|
||||
"${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ steps.release_name.outputs.name }}.tgz"
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
@@ -259,69 +220,6 @@ jobs:
|
||||
tag: ${{ github.ref_name }}
|
||||
config: .github/dotslash-config.json
|
||||
|
||||
# Publish to npm using OIDC authentication.
|
||||
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
|
||||
# npm docs: https://docs.npmjs.com/trusted-publishers
|
||||
publish-npm:
|
||||
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
|
||||
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
|
||||
name: publish-npm
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
scope: "@openai"
|
||||
|
||||
# Trusted publishing requires npm CLI version 11.5.1 or later.
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarballs from release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version="${{ needs.release.outputs.version }}"
|
||||
tag="${{ needs.release.outputs.tag }}"
|
||||
mkdir -p dist/npm
|
||||
gh release download "$tag" \
|
||||
--repo "${GITHUB_REPOSITORY}" \
|
||||
--pattern "codex-npm-${version}.tgz" \
|
||||
--dir dist/npm
|
||||
gh release download "$tag" \
|
||||
--repo "${GITHUB_REPOSITORY}" \
|
||||
--pattern "codex-responses-api-proxy-npm-${version}.tgz" \
|
||||
--dir dist/npm
|
||||
|
||||
# No NODE_AUTH_TOKEN needed because we use OIDC.
|
||||
- name: Publish to npm
|
||||
env:
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
NPM_TAG: ${{ needs.release.outputs.npm_tag }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tag_args=()
|
||||
if [[ -n "${NPM_TAG}" ]]; then
|
||||
tag_args+=(--tag "${NPM_TAG}")
|
||||
fi
|
||||
|
||||
tarballs=(
|
||||
"codex-npm-${VERSION}.tgz"
|
||||
"codex-responses-api-proxy-npm-${VERSION}.tgz"
|
||||
)
|
||||
|
||||
for tarball in "${tarballs[@]}"; do
|
||||
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" "${tag_args[@]}"
|
||||
done
|
||||
|
||||
update-branch:
|
||||
name: Update latest-alpha-cli branch
|
||||
permissions:
|
||||
|
||||
@@ -4,7 +4,6 @@ In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
|
||||
- When using format! and you can inline variables into {}, always do that.
|
||||
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
<h1 align="center">OpenAI Codex CLI</h1>
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
|
||||
|
||||
@@ -101,3 +102,4 @@ Codex CLI supports a rich set of configuration options, with preferences stored
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
|
||||
8
codex-cli/.gitignore
vendored
8
codex-cli/.gitignore
vendored
@@ -1 +1,7 @@
|
||||
/vendor/
|
||||
# Added by ./scripts/install_native_deps.sh
|
||||
/bin/codex-aarch64-apple-darwin
|
||||
/bin/codex-aarch64-unknown-linux-musl
|
||||
/bin/codex-linux-sandbox-arm64
|
||||
/bin/codex-linux-sandbox-x64
|
||||
/bin/codex-x86_64-apple-darwin
|
||||
/bin/codex-x86_64-unknown-linux-musl
|
||||
|
||||
@@ -208,7 +208,7 @@ The hardening mechanism Codex uses depends on your OS:
|
||||
| Requirement | Details |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||
| Node.js | **16 or newer** (Node 20 LTS recommended) |
|
||||
| Node.js | **22 or newer** (LTS recommended) |
|
||||
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
|
||||
| RAM | 4-GB minimum (8-GB recommended) |
|
||||
|
||||
@@ -513,7 +513,7 @@ Codex runs model-generated commands in a sandbox. If a proposed command or file
|
||||
<details>
|
||||
<summary>Does it work on Windows?</summary>
|
||||
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
// Unified entry point for the Codex CLI.
|
||||
|
||||
import { spawn } from "node:child_process";
|
||||
import { existsSync } from "fs";
|
||||
import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
@@ -42,10 +40,10 @@ switch (platform) {
|
||||
case "win32":
|
||||
switch (arch) {
|
||||
case "x64":
|
||||
targetTriple = "x86_64-pc-windows-msvc";
|
||||
targetTriple = "x86_64-pc-windows-msvc.exe";
|
||||
break;
|
||||
case "arm64":
|
||||
targetTriple = "aarch64-pc-windows-msvc";
|
||||
targetTriple = "aarch64-pc-windows-msvc.exe";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -59,16 +57,31 @@ if (!targetTriple) {
|
||||
throw new Error(`Unsupported platform: ${platform} (${arch})`);
|
||||
}
|
||||
|
||||
const vendorRoot = path.join(__dirname, "..", "vendor");
|
||||
const archRoot = path.join(vendorRoot, targetTriple);
|
||||
const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex";
|
||||
const binaryPath = path.join(archRoot, "codex", codexBinaryName);
|
||||
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
|
||||
|
||||
// Use an asynchronous spawn instead of spawnSync so that Node is able to
|
||||
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
|
||||
// executing. This allows us to forward those signals to the child process
|
||||
// and guarantees that when either the child terminates or the parent
|
||||
// receives a fatal signal, both processes exit in a predictable manner.
|
||||
const { spawn } = await import("child_process");
|
||||
|
||||
async function tryImport(moduleName) {
|
||||
try {
|
||||
// eslint-disable-next-line node/no-unsupported-features/es-syntax
|
||||
return await import(moduleName);
|
||||
} catch (err) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function resolveRgDir() {
|
||||
const ripgrep = await tryImport("@vscode/ripgrep");
|
||||
if (!ripgrep?.rgPath) {
|
||||
return null;
|
||||
}
|
||||
return path.dirname(ripgrep.rgPath);
|
||||
}
|
||||
|
||||
function getUpdatedPath(newDirs) {
|
||||
const pathSep = process.platform === "win32" ? ";" : ":";
|
||||
@@ -81,9 +94,9 @@ function getUpdatedPath(newDirs) {
|
||||
}
|
||||
|
||||
const additionalDirs = [];
|
||||
const pathDir = path.join(archRoot, "path");
|
||||
if (existsSync(pathDir)) {
|
||||
additionalDirs.push(pathDir);
|
||||
const rgDir = await resolveRgDir();
|
||||
if (rgDir) {
|
||||
additionalDirs.push(rgDir);
|
||||
}
|
||||
const updatedPath = getUpdatedPath(additionalDirs);
|
||||
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
#!/usr/bin/env dotslash
|
||||
|
||||
{
|
||||
"name": "rg",
|
||||
"platforms": {
|
||||
"macos-aarch64": {
|
||||
"size": 1787248,
|
||||
"hash": "blake3",
|
||||
"digest": "8d9942032585ea8ee805937634238d9aee7b210069f4703c88fbe568e26fb78a",
|
||||
"format": "tar.gz",
|
||||
"path": "ripgrep-14.1.1-aarch64-apple-darwin/rg",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-apple-darwin.tar.gz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"size": 2047405,
|
||||
"hash": "blake3",
|
||||
"digest": "0b670b8fa0a3df2762af2fc82cc4932f684ca4c02dbd1260d4f3133fd4b2a515",
|
||||
"format": "tar.gz",
|
||||
"path": "ripgrep-14.1.1-aarch64-unknown-linux-gnu/rg",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-unknown-linux-gnu.tar.gz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos-x86_64": {
|
||||
"size": 2082672,
|
||||
"hash": "blake3",
|
||||
"digest": "e9b862fc8da3127f92791f0ff6a799504154ca9d36c98bf3e60a81c6b1f7289e",
|
||||
"format": "tar.gz",
|
||||
"path": "ripgrep-14.1.1-x86_64-apple-darwin/rg",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-apple-darwin.tar.gz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"size": 2566310,
|
||||
"hash": "blake3",
|
||||
"digest": "f73cca4e54d78c31f832c7f6e2c0b4db8b04fa3eaa747915727d570893dbee76",
|
||||
"format": "tar.gz",
|
||||
"path": "ripgrep-14.1.1-x86_64-unknown-linux-musl/rg",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-unknown-linux-musl.tar.gz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"windows-x86_64": {
|
||||
"size": 2058893,
|
||||
"hash": "blake3",
|
||||
"digest": "a8ce1a6fed4f8093ee997e57f33254e94b2cd18e26358b09db599c89882eadbd",
|
||||
"format": "zip",
|
||||
"path": "ripgrep-14.1.1-x86_64-pc-windows-msvc/rg.exe",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-pc-windows-msvc.zip"
|
||||
}
|
||||
]
|
||||
},
|
||||
"windows-aarch64": {
|
||||
"size": 1667740,
|
||||
"hash": "blake3",
|
||||
"digest": "47b971a8c4fca1d23a4e7c19bd4d88465ebc395598458133139406d3bf85f3fa",
|
||||
"format": "zip",
|
||||
"path": "rg.exe",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/microsoft/ripgrep-prebuilt/releases/download/v13.0.0-13/ripgrep-v13.0.0-13-aarch64-pc-windows-msvc.zip"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
103
codex-cli/package-lock.json
generated
103
codex-cli/package-lock.json
generated
@@ -2,16 +2,117 @@
|
||||
"name": "@openai/codex",
|
||||
"version": "0.0.0-dev",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@openai/codex",
|
||||
"version": "0.0.0-dev",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@vscode/ripgrep": "^1.15.14"
|
||||
},
|
||||
"bin": {
|
||||
"codex": "bin/codex.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
"node": ">=20"
|
||||
}
|
||||
},
|
||||
"node_modules/@vscode/ripgrep": {
|
||||
"version": "1.15.14",
|
||||
"resolved": "https://registry.npmjs.org/@vscode/ripgrep/-/ripgrep-1.15.14.tgz",
|
||||
"integrity": "sha512-/G1UJPYlm+trBWQ6cMO3sv6b8D1+G16WaJH1/DSqw32JOVlzgZbLkDxRyzIpTpv30AcYGMkCf5tUqGlW6HbDWw==",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"https-proxy-agent": "^7.0.2",
|
||||
"proxy-from-env": "^1.1.0",
|
||||
"yauzl": "^2.9.2"
|
||||
}
|
||||
},
|
||||
"node_modules/agent-base": {
|
||||
"version": "7.1.4",
|
||||
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
|
||||
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/buffer-crc32": {
|
||||
"version": "0.2.13",
|
||||
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
|
||||
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/debug": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
|
||||
"integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ms": "^2.1.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"supports-color": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/fd-slicer": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz",
|
||||
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"pend": "~1.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/https-proxy-agent": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
|
||||
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"agent-base": "^7.1.2",
|
||||
"debug": "4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/pend": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz",
|
||||
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/proxy-from-env": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/yauzl": {
|
||||
"version": "2.10.0",
|
||||
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
|
||||
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"buffer-crc32": "~0.2.3",
|
||||
"fd-slicer": "~1.1.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,15 +7,20 @@
|
||||
},
|
||||
"type": "module",
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
"node": ">=20"
|
||||
},
|
||||
"files": [
|
||||
"bin",
|
||||
"vendor"
|
||||
"dist"
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/openai/codex.git",
|
||||
"directory": "codex-cli"
|
||||
"url": "git+https://github.com/openai/codex.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vscode/ripgrep": "^1.15.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"prettier": "^3.3.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,5 @@ Run the following:
|
||||
To build the 0.2.x or later version of the npm module, which runs the Rust version of the CLI, build it as follows:
|
||||
|
||||
```bash
|
||||
./codex-cli/scripts/build_npm_package.py --release-version 0.6.0
|
||||
./codex-cli/scripts/stage_rust_release.py --release-version 0.6.0
|
||||
```
|
||||
|
||||
Note this will create `./codex-cli/vendor/` as a side-effect.
|
||||
|
||||
@@ -1,307 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Stage and optionally package the @openai/codex npm module."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
CODEX_CLI_ROOT = SCRIPT_DIR.parent
|
||||
REPO_ROOT = CODEX_CLI_ROOT.parent
|
||||
RESPONSES_API_PROXY_NPM_ROOT = REPO_ROOT / "codex-rs" / "responses-api-proxy" / "npm"
|
||||
GITHUB_REPO = "openai/codex"
|
||||
|
||||
# The docs are not clear on what the expected value/format of
|
||||
# workflow/workflowName is:
|
||||
# https://cli.github.com/manual/gh_run_list
|
||||
WORKFLOW_NAME = ".github/workflows/rust-release.yml"
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Build or stage the Codex CLI npm package.")
|
||||
parser.add_argument(
|
||||
"--package",
|
||||
choices=("codex", "codex-responses-api-proxy"),
|
||||
default="codex",
|
||||
help="Which npm package to stage (default: codex).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
help="Version number to write to package.json inside the staged package.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--release-version",
|
||||
help=(
|
||||
"Version to stage for npm release. When provided, the script also resolves the "
|
||||
"matching rust-release workflow unless --workflow-url is supplied."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--workflow-url",
|
||||
help="Optional GitHub Actions workflow run URL used to download native binaries.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--staging-dir",
|
||||
type=Path,
|
||||
help=(
|
||||
"Directory to stage the package contents. Defaults to a new temporary directory "
|
||||
"if omitted. The directory must be empty when provided."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tmp",
|
||||
dest="staging_dir",
|
||||
type=Path,
|
||||
help=argparse.SUPPRESS,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pack-output",
|
||||
type=Path,
|
||||
help="Path where the generated npm tarball should be written.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
package = args.package
|
||||
version = args.version
|
||||
release_version = args.release_version
|
||||
if release_version:
|
||||
if version and version != release_version:
|
||||
raise RuntimeError("--version and --release-version must match when both are provided.")
|
||||
version = release_version
|
||||
|
||||
if not version:
|
||||
raise RuntimeError("Must specify --version or --release-version.")
|
||||
|
||||
staging_dir, created_temp = prepare_staging_dir(args.staging_dir)
|
||||
|
||||
try:
|
||||
stage_sources(staging_dir, version, package)
|
||||
|
||||
workflow_url = args.workflow_url
|
||||
resolved_head_sha: str | None = None
|
||||
if not workflow_url:
|
||||
if release_version:
|
||||
workflow = resolve_release_workflow(version)
|
||||
workflow_url = workflow["url"]
|
||||
resolved_head_sha = workflow.get("headSha")
|
||||
else:
|
||||
workflow_url = resolve_latest_alpha_workflow_url()
|
||||
elif release_version:
|
||||
try:
|
||||
workflow = resolve_release_workflow(version)
|
||||
resolved_head_sha = workflow.get("headSha")
|
||||
except Exception:
|
||||
resolved_head_sha = None
|
||||
|
||||
if release_version and resolved_head_sha:
|
||||
print(f"should `git checkout {resolved_head_sha}`")
|
||||
|
||||
if not workflow_url:
|
||||
raise RuntimeError("Unable to determine workflow URL for native binaries.")
|
||||
|
||||
install_native_binaries(staging_dir, workflow_url, package)
|
||||
|
||||
if release_version:
|
||||
staging_dir_str = str(staging_dir)
|
||||
if package == "codex":
|
||||
print(
|
||||
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
||||
"Verify the CLI:\n"
|
||||
f" node {staging_dir_str}/bin/codex.js --version\n"
|
||||
f" node {staging_dir_str}/bin/codex.js --help\n\n"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
||||
"Verify the responses API proxy:\n"
|
||||
f" node {staging_dir_str}/bin/codex-responses-api-proxy.js --help\n\n"
|
||||
)
|
||||
else:
|
||||
print(f"Staged package in {staging_dir}")
|
||||
|
||||
if args.pack_output is not None:
|
||||
output_path = run_npm_pack(staging_dir, args.pack_output)
|
||||
print(f"npm pack output written to {output_path}")
|
||||
finally:
|
||||
if created_temp:
|
||||
# Preserve the staging directory for further inspection.
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def prepare_staging_dir(staging_dir: Path | None) -> tuple[Path, bool]:
|
||||
if staging_dir is not None:
|
||||
staging_dir = staging_dir.resolve()
|
||||
staging_dir.mkdir(parents=True, exist_ok=True)
|
||||
if any(staging_dir.iterdir()):
|
||||
raise RuntimeError(f"Staging directory {staging_dir} is not empty.")
|
||||
return staging_dir, False
|
||||
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="codex-npm-stage-"))
|
||||
return temp_dir, True
|
||||
|
||||
|
||||
def stage_sources(staging_dir: Path, version: str, package: str) -> None:
|
||||
bin_dir = staging_dir / "bin"
|
||||
bin_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if package == "codex":
|
||||
shutil.copy2(CODEX_CLI_ROOT / "bin" / "codex.js", bin_dir / "codex.js")
|
||||
rg_manifest = CODEX_CLI_ROOT / "bin" / "rg"
|
||||
if rg_manifest.exists():
|
||||
shutil.copy2(rg_manifest, bin_dir / "rg")
|
||||
|
||||
readme_src = REPO_ROOT / "README.md"
|
||||
if readme_src.exists():
|
||||
shutil.copy2(readme_src, staging_dir / "README.md")
|
||||
|
||||
package_json_path = CODEX_CLI_ROOT / "package.json"
|
||||
elif package == "codex-responses-api-proxy":
|
||||
launcher_src = RESPONSES_API_PROXY_NPM_ROOT / "bin" / "codex-responses-api-proxy.js"
|
||||
shutil.copy2(launcher_src, bin_dir / "codex-responses-api-proxy.js")
|
||||
|
||||
readme_src = RESPONSES_API_PROXY_NPM_ROOT / "README.md"
|
||||
if readme_src.exists():
|
||||
shutil.copy2(readme_src, staging_dir / "README.md")
|
||||
|
||||
package_json_path = RESPONSES_API_PROXY_NPM_ROOT / "package.json"
|
||||
else:
|
||||
raise RuntimeError(f"Unknown package '{package}'.")
|
||||
|
||||
with open(package_json_path, "r", encoding="utf-8") as fh:
|
||||
package_json = json.load(fh)
|
||||
package_json["version"] = version
|
||||
|
||||
with open(staging_dir / "package.json", "w", encoding="utf-8") as out:
|
||||
json.dump(package_json, out, indent=2)
|
||||
out.write("\n")
|
||||
|
||||
|
||||
def install_native_binaries(staging_dir: Path, workflow_url: str, package: str) -> None:
|
||||
package_components = {
|
||||
"codex": ["codex", "rg"],
|
||||
"codex-responses-api-proxy": ["codex-responses-api-proxy"],
|
||||
}
|
||||
|
||||
components = package_components.get(package)
|
||||
if components is None:
|
||||
raise RuntimeError(f"Unknown package '{package}'.")
|
||||
|
||||
cmd = ["./scripts/install_native_deps.py", "--workflow-url", workflow_url]
|
||||
for component in components:
|
||||
cmd.extend(["--component", component])
|
||||
cmd.append(str(staging_dir))
|
||||
subprocess.check_call(cmd, cwd=CODEX_CLI_ROOT)
|
||||
|
||||
|
||||
def resolve_latest_alpha_workflow_url() -> str:
|
||||
version = determine_latest_alpha_version()
|
||||
workflow = resolve_release_workflow(version)
|
||||
return workflow["url"]
|
||||
|
||||
|
||||
def determine_latest_alpha_version() -> str:
|
||||
releases = list_releases()
|
||||
best_key: tuple[int, int, int, int] | None = None
|
||||
best_version: str | None = None
|
||||
pattern = re.compile(r"^rust-v(\d+)\.(\d+)\.(\d+)-alpha\.(\d+)$")
|
||||
for release in releases:
|
||||
tag = release.get("tag_name", "")
|
||||
match = pattern.match(tag)
|
||||
if not match:
|
||||
continue
|
||||
key = tuple(int(match.group(i)) for i in range(1, 5))
|
||||
if best_key is None or key > best_key:
|
||||
best_key = key
|
||||
best_version = (
|
||||
f"{match.group(1)}.{match.group(2)}.{match.group(3)}-alpha.{match.group(4)}"
|
||||
)
|
||||
|
||||
if best_version is None:
|
||||
raise RuntimeError("No alpha releases found when resolving workflow URL.")
|
||||
return best_version
|
||||
|
||||
|
||||
def list_releases() -> list[dict]:
|
||||
stdout = subprocess.check_output(
|
||||
["gh", "api", f"/repos/{GITHUB_REPO}/releases?per_page=100"],
|
||||
text=True,
|
||||
)
|
||||
try:
|
||||
releases = json.loads(stdout or "[]")
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError("Unable to parse releases JSON.") from exc
|
||||
if not isinstance(releases, list):
|
||||
raise RuntimeError("Unexpected response when listing releases.")
|
||||
return releases
|
||||
|
||||
|
||||
def resolve_release_workflow(version: str) -> dict:
|
||||
stdout = subprocess.check_output(
|
||||
[
|
||||
"gh",
|
||||
"run",
|
||||
"list",
|
||||
"--branch",
|
||||
f"rust-v{version}",
|
||||
"--json",
|
||||
"workflowName,url,headSha",
|
||||
"--workflow",
|
||||
WORKFLOW_NAME,
|
||||
"--jq",
|
||||
"first(.[])",
|
||||
],
|
||||
text=True,
|
||||
)
|
||||
workflow = json.loads(stdout or "[]")
|
||||
if not workflow:
|
||||
raise RuntimeError(f"Unable to find rust-release workflow for version {version}.")
|
||||
return workflow
|
||||
|
||||
|
||||
def run_npm_pack(staging_dir: Path, output_path: Path) -> Path:
|
||||
output_path = output_path.resolve()
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="codex-npm-pack-") as pack_dir_str:
|
||||
pack_dir = Path(pack_dir_str)
|
||||
stdout = subprocess.check_output(
|
||||
["npm", "pack", "--json", "--pack-destination", str(pack_dir)],
|
||||
cwd=staging_dir,
|
||||
text=True,
|
||||
)
|
||||
try:
|
||||
pack_output = json.loads(stdout)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError("Failed to parse npm pack output.") from exc
|
||||
|
||||
if not pack_output:
|
||||
raise RuntimeError("npm pack did not produce an output tarball.")
|
||||
|
||||
tarball_name = pack_output[0].get("filename") or pack_output[0].get("name")
|
||||
if not tarball_name:
|
||||
raise RuntimeError("Unable to determine npm pack output filename.")
|
||||
|
||||
tarball_path = pack_dir / tarball_name
|
||||
if not tarball_path.exists():
|
||||
raise RuntimeError(f"Expected npm pack output not found: {tarball_path}")
|
||||
|
||||
shutil.move(str(tarball_path), output_path)
|
||||
|
||||
return output_path
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
@@ -1,383 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tarfile
|
||||
import tempfile
|
||||
import zipfile
|
||||
from dataclasses import dataclass
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Sequence
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlopen
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
CODEX_CLI_ROOT = SCRIPT_DIR.parent
|
||||
DEFAULT_WORKFLOW_URL = "https://github.com/openai/codex/actions/runs/17952349351" # rust-v0.40.0
|
||||
VENDOR_DIR_NAME = "vendor"
|
||||
RG_MANIFEST = CODEX_CLI_ROOT / "bin" / "rg"
|
||||
BINARY_TARGETS = (
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-apple-darwin",
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"aarch64-pc-windows-msvc",
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class BinaryComponent:
|
||||
artifact_prefix: str # matches the artifact filename prefix (e.g. codex-<target>.zst)
|
||||
dest_dir: str # directory under vendor/<target>/ where the binary is installed
|
||||
binary_basename: str # executable name inside dest_dir (before optional .exe)
|
||||
|
||||
|
||||
BINARY_COMPONENTS = {
|
||||
"codex": BinaryComponent(
|
||||
artifact_prefix="codex",
|
||||
dest_dir="codex",
|
||||
binary_basename="codex",
|
||||
),
|
||||
"codex-responses-api-proxy": BinaryComponent(
|
||||
artifact_prefix="codex-responses-api-proxy",
|
||||
dest_dir="codex-responses-api-proxy",
|
||||
binary_basename="codex-responses-api-proxy",
|
||||
),
|
||||
}
|
||||
|
||||
RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
|
||||
("x86_64-unknown-linux-musl", "linux-x86_64"),
|
||||
("aarch64-unknown-linux-musl", "linux-aarch64"),
|
||||
("x86_64-apple-darwin", "macos-x86_64"),
|
||||
("aarch64-apple-darwin", "macos-aarch64"),
|
||||
("x86_64-pc-windows-msvc", "windows-x86_64"),
|
||||
("aarch64-pc-windows-msvc", "windows-aarch64"),
|
||||
]
|
||||
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
|
||||
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||
parser.add_argument(
|
||||
"--workflow-url",
|
||||
help=(
|
||||
"GitHub Actions workflow URL that produced the artifacts. Defaults to a "
|
||||
"known good run when omitted."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--component",
|
||||
dest="components",
|
||||
action="append",
|
||||
choices=tuple(list(BINARY_COMPONENTS) + ["rg"]),
|
||||
help=(
|
||||
"Limit installation to the specified components."
|
||||
" May be repeated. Defaults to 'codex' and 'rg'."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"root",
|
||||
nargs="?",
|
||||
type=Path,
|
||||
help=(
|
||||
"Directory containing package.json for the staged package. If omitted, the "
|
||||
"repository checkout is used."
|
||||
),
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
codex_cli_root = (args.root or CODEX_CLI_ROOT).resolve()
|
||||
vendor_dir = codex_cli_root / VENDOR_DIR_NAME
|
||||
vendor_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
components = args.components or ["codex", "rg"]
|
||||
|
||||
workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip()
|
||||
if not workflow_url:
|
||||
workflow_url = DEFAULT_WORKFLOW_URL
|
||||
|
||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
BINARY_TARGETS,
|
||||
[name for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
|
||||
if "rg" in components:
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
|
||||
print(f"Installed native dependencies into {vendor_dir}")
|
||||
return 0
|
||||
|
||||
|
||||
def fetch_rg(
|
||||
vendor_dir: Path,
|
||||
targets: Sequence[str] | None = None,
|
||||
*,
|
||||
manifest_path: Path,
|
||||
) -> list[Path]:
|
||||
"""Download ripgrep binaries described by the DotSlash manifest."""
|
||||
|
||||
if targets is None:
|
||||
targets = DEFAULT_RG_TARGETS
|
||||
|
||||
if not manifest_path.exists():
|
||||
raise FileNotFoundError(f"DotSlash manifest not found: {manifest_path}")
|
||||
|
||||
manifest = _load_manifest(manifest_path)
|
||||
platforms = manifest.get("platforms", {})
|
||||
|
||||
vendor_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
targets = list(targets)
|
||||
if not targets:
|
||||
return []
|
||||
|
||||
task_configs: list[tuple[str, str, dict]] = []
|
||||
for target in targets:
|
||||
platform_key = RG_TARGET_TO_PLATFORM.get(target)
|
||||
if platform_key is None:
|
||||
raise ValueError(f"Unsupported ripgrep target '{target}'.")
|
||||
|
||||
platform_info = platforms.get(platform_key)
|
||||
if platform_info is None:
|
||||
raise RuntimeError(f"Platform '{platform_key}' not found in manifest {manifest_path}.")
|
||||
|
||||
task_configs.append((target, platform_key, platform_info))
|
||||
|
||||
results: dict[str, Path] = {}
|
||||
max_workers = min(len(task_configs), max(1, (os.cpu_count() or 1)))
|
||||
|
||||
print("Installing ripgrep binaries for targets: " + ", ".join(targets))
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_fetch_single_rg,
|
||||
vendor_dir,
|
||||
target,
|
||||
platform_key,
|
||||
platform_info,
|
||||
manifest_path,
|
||||
): target
|
||||
for target, platform_key, platform_info in task_configs
|
||||
}
|
||||
|
||||
for future in as_completed(future_map):
|
||||
target = future_map[future]
|
||||
results[target] = future.result()
|
||||
print(f" installed ripgrep for {target}")
|
||||
|
||||
return [results[target] for target in targets]
|
||||
|
||||
|
||||
def _download_artifacts(workflow_id: str, dest_dir: Path) -> None:
|
||||
cmd = [
|
||||
"gh",
|
||||
"run",
|
||||
"download",
|
||||
"--dir",
|
||||
str(dest_dir),
|
||||
"--repo",
|
||||
"openai/codex",
|
||||
workflow_id,
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def install_binary_components(
|
||||
artifacts_dir: Path,
|
||||
vendor_dir: Path,
|
||||
targets: Iterable[str],
|
||||
component_names: Sequence[str],
|
||||
) -> None:
|
||||
selected_components = [BINARY_COMPONENTS[name] for name in component_names if name in BINARY_COMPONENTS]
|
||||
if not selected_components:
|
||||
return
|
||||
|
||||
targets = list(targets)
|
||||
if not targets:
|
||||
return
|
||||
|
||||
for component in selected_components:
|
||||
print(
|
||||
f"Installing {component.binary_basename} binaries for targets: "
|
||||
+ ", ".join(targets)
|
||||
)
|
||||
max_workers = min(len(targets), max(1, (os.cpu_count() or 1)))
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
futures = {
|
||||
executor.submit(
|
||||
_install_single_binary,
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
target,
|
||||
component,
|
||||
): target
|
||||
for target in targets
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
installed_path = future.result()
|
||||
print(f" installed {installed_path}")
|
||||
|
||||
|
||||
def _install_single_binary(
|
||||
artifacts_dir: Path,
|
||||
vendor_dir: Path,
|
||||
target: str,
|
||||
component: BinaryComponent,
|
||||
) -> Path:
|
||||
artifact_subdir = artifacts_dir / target
|
||||
archive_name = _archive_name_for_target(component.artifact_prefix, target)
|
||||
archive_path = artifact_subdir / archive_name
|
||||
if not archive_path.exists():
|
||||
raise FileNotFoundError(f"Expected artifact not found: {archive_path}")
|
||||
|
||||
dest_dir = vendor_dir / target / component.dest_dir
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
binary_name = (
|
||||
f"{component.binary_basename}.exe" if "windows" in target else component.binary_basename
|
||||
)
|
||||
dest = dest_dir / binary_name
|
||||
dest.unlink(missing_ok=True)
|
||||
extract_archive(archive_path, "zst", None, dest)
|
||||
if "windows" not in target:
|
||||
dest.chmod(0o755)
|
||||
return dest
|
||||
|
||||
|
||||
def _archive_name_for_target(artifact_prefix: str, target: str) -> str:
|
||||
if "windows" in target:
|
||||
return f"{artifact_prefix}-{target}.exe.zst"
|
||||
return f"{artifact_prefix}-{target}.zst"
|
||||
|
||||
|
||||
def _fetch_single_rg(
|
||||
vendor_dir: Path,
|
||||
target: str,
|
||||
platform_key: str,
|
||||
platform_info: dict,
|
||||
manifest_path: Path,
|
||||
) -> Path:
|
||||
providers = platform_info.get("providers", [])
|
||||
if not providers:
|
||||
raise RuntimeError(f"No providers listed for platform '{platform_key}' in {manifest_path}.")
|
||||
|
||||
url = providers[0]["url"]
|
||||
archive_format = platform_info.get("format", "zst")
|
||||
archive_member = platform_info.get("path")
|
||||
|
||||
dest_dir = vendor_dir / target / "path"
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
is_windows = platform_key.startswith("win")
|
||||
binary_name = "rg.exe" if is_windows else "rg"
|
||||
dest = dest_dir / binary_name
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_str:
|
||||
tmp_dir = Path(tmp_dir_str)
|
||||
archive_filename = os.path.basename(urlparse(url).path)
|
||||
download_path = tmp_dir / archive_filename
|
||||
_download_file(url, download_path)
|
||||
|
||||
dest.unlink(missing_ok=True)
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
|
||||
if not is_windows:
|
||||
dest.chmod(0o755)
|
||||
|
||||
return dest
|
||||
|
||||
|
||||
def _download_file(url: str, dest: Path) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
with urlopen(url) as response, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(response, out)
|
||||
|
||||
|
||||
def extract_archive(
|
||||
archive_path: Path,
|
||||
archive_format: str,
|
||||
archive_member: str | None,
|
||||
dest: Path,
|
||||
) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if archive_format == "zst":
|
||||
output_path = archive_path.parent / dest.name
|
||||
subprocess.check_call(
|
||||
["zstd", "-f", "-d", str(archive_path), "-o", str(output_path)]
|
||||
)
|
||||
shutil.move(str(output_path), dest)
|
||||
return
|
||||
|
||||
if archive_format == "tar.gz":
|
||||
if not archive_member:
|
||||
raise RuntimeError("Missing 'path' for tar.gz archive in DotSlash manifest.")
|
||||
with tarfile.open(archive_path, "r:gz") as tar:
|
||||
try:
|
||||
member = tar.getmember(archive_member)
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(
|
||||
f"Entry '{archive_member}' not found in archive {archive_path}."
|
||||
) from exc
|
||||
tar.extract(member, path=archive_path.parent, filter="data")
|
||||
extracted = archive_path.parent / archive_member
|
||||
shutil.move(str(extracted), dest)
|
||||
return
|
||||
|
||||
if archive_format == "zip":
|
||||
if not archive_member:
|
||||
raise RuntimeError("Missing 'path' for zip archive in DotSlash manifest.")
|
||||
with zipfile.ZipFile(archive_path) as archive:
|
||||
try:
|
||||
with archive.open(archive_member) as src, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(src, out)
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(
|
||||
f"Entry '{archive_member}' not found in archive {archive_path}."
|
||||
) from exc
|
||||
return
|
||||
|
||||
raise RuntimeError(f"Unsupported archive format '{archive_format}'.")
|
||||
|
||||
|
||||
def _load_manifest(manifest_path: Path) -> dict:
|
||||
cmd = ["dotslash", "--", "parse", str(manifest_path)]
|
||||
stdout = subprocess.check_output(cmd, text=True)
|
||||
try:
|
||||
manifest = json.loads(stdout)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(f"Invalid DotSlash manifest output from {manifest_path}.") from exc
|
||||
|
||||
if not isinstance(manifest, dict):
|
||||
raise RuntimeError(
|
||||
f"Unexpected DotSlash manifest structure for {manifest_path}: {type(manifest)!r}"
|
||||
)
|
||||
|
||||
return manifest
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
94
codex-cli/scripts/install_native_deps.sh
Executable file
94
codex-cli/scripts/install_native_deps.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Install native runtime dependencies for codex-cli.
|
||||
#
|
||||
# Usage
|
||||
# install_native_deps.sh [--workflow-url URL] [CODEX_CLI_ROOT]
|
||||
#
|
||||
# The optional RELEASE_ROOT is the path that contains package.json. Omitting
|
||||
# it installs the binaries into the repository's own bin/ folder to support
|
||||
# local development.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ------------------
|
||||
# Parse arguments
|
||||
# ------------------
|
||||
|
||||
CODEX_CLI_ROOT=""
|
||||
|
||||
# Until we start publishing stable GitHub releases, we have to grab the binaries
|
||||
# from the GitHub Action that created them. Update the URL below to point to the
|
||||
# appropriate workflow run:
|
||||
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/17417194663" # rust-v0.28.0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--workflow-url)
|
||||
shift || { echo "--workflow-url requires an argument"; exit 1; }
|
||||
if [ -n "$1" ]; then
|
||||
WORKFLOW_URL="$1"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$CODEX_CLI_ROOT" ]]; then
|
||||
CODEX_CLI_ROOT="$1"
|
||||
else
|
||||
echo "Unexpected argument: $1" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Determine where the binaries should be installed.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if [ -n "$CODEX_CLI_ROOT" ]; then
|
||||
# The caller supplied a release root directory.
|
||||
BIN_DIR="$CODEX_CLI_ROOT/bin"
|
||||
else
|
||||
# No argument; fall back to the repo’s own bin directory.
|
||||
# Resolve the path of this script, then walk up to the repo root.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
BIN_DIR="$CODEX_CLI_ROOT/bin"
|
||||
fi
|
||||
|
||||
# Make sure the destination directory exists.
|
||||
mkdir -p "$BIN_DIR"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Download and decompress the artifacts from the GitHub Actions workflow.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
WORKFLOW_ID="${WORKFLOW_URL##*/}"
|
||||
|
||||
ARTIFACTS_DIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$ARTIFACTS_DIR"' EXIT
|
||||
|
||||
# NB: The GitHub CLI `gh` must be installed and authenticated.
|
||||
gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID"
|
||||
|
||||
# x64 Linux
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-x86_64-unknown-linux-musl"
|
||||
# ARM64 Linux
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \
|
||||
-o "$BIN_DIR/codex-aarch64-unknown-linux-musl"
|
||||
# x64 macOS
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \
|
||||
-o "$BIN_DIR/codex-x86_64-apple-darwin"
|
||||
# ARM64 macOS
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/codex-aarch64-apple-darwin.zst" \
|
||||
-o "$BIN_DIR/codex-aarch64-apple-darwin"
|
||||
# x64 Windows
|
||||
zstd -d "$ARTIFACTS_DIR/x86_64-pc-windows-msvc/codex-x86_64-pc-windows-msvc.exe.zst" \
|
||||
-o "$BIN_DIR/codex-x86_64-pc-windows-msvc.exe"
|
||||
# ARM64 Windows
|
||||
zstd -d "$ARTIFACTS_DIR/aarch64-pc-windows-msvc/codex-aarch64-pc-windows-msvc.exe.zst" \
|
||||
-o "$BIN_DIR/codex-aarch64-pc-windows-msvc.exe"
|
||||
|
||||
echo "Installed native dependencies into $BIN_DIR"
|
||||
120
codex-cli/scripts/stage_release.sh
Executable file
120
codex-cli/scripts/stage_release.sh
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env bash
|
||||
# -----------------------------------------------------------------------------
|
||||
# stage_release.sh
|
||||
# -----------------------------------------------------------------------------
|
||||
# Stages an npm release for @openai/codex.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# --tmp <dir> : Use <dir> instead of a freshly created temp directory.
|
||||
# -h|--help : Print usage.
|
||||
#
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Helper - usage / flag parsing
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") [--tmp DIR] [--version VERSION]
|
||||
|
||||
Options
|
||||
--tmp DIR Use DIR to stage the release (defaults to a fresh mktemp dir)
|
||||
--version Specify the version to release (defaults to a timestamp-based version)
|
||||
-h, --help Show this help
|
||||
|
||||
Legacy positional argument: the first non-flag argument is still interpreted
|
||||
as the temporary directory (for backwards compatibility) but is deprecated.
|
||||
EOF
|
||||
exit "${1:-0}"
|
||||
}
|
||||
|
||||
TMPDIR=""
|
||||
# Default to a timestamp-based version (keep same scheme as before)
|
||||
VERSION="$(printf '0.1.%d' "$(date +%y%m%d%H%M)")"
|
||||
WORKFLOW_URL=""
|
||||
|
||||
# Manual flag parser - Bash getopts does not handle GNU long options well.
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--tmp)
|
||||
shift || { echo "--tmp requires an argument"; usage 1; }
|
||||
TMPDIR="$1"
|
||||
;;
|
||||
--tmp=*)
|
||||
TMPDIR="${1#*=}"
|
||||
;;
|
||||
--version)
|
||||
shift || { echo "--version requires an argument"; usage 1; }
|
||||
VERSION="$1"
|
||||
;;
|
||||
--workflow-url)
|
||||
shift || { echo "--workflow-url requires an argument"; exit 1; }
|
||||
WORKFLOW_URL="$1"
|
||||
;;
|
||||
-h|--help)
|
||||
usage 0
|
||||
;;
|
||||
--*)
|
||||
echo "Unknown option: $1" >&2
|
||||
usage 1
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected extra argument: $1" >&2
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Fallback when the caller did not specify a directory.
|
||||
# If no directory was specified create a fresh temporary one.
|
||||
if [[ -z "$TMPDIR" ]]; then
|
||||
TMPDIR="$(mktemp -d)"
|
||||
fi
|
||||
|
||||
# Ensure the directory exists, then resolve to an absolute path.
|
||||
mkdir -p "$TMPDIR"
|
||||
TMPDIR="$(cd "$TMPDIR" && pwd)"
|
||||
|
||||
# Main build logic
|
||||
|
||||
echo "Staging release in $TMPDIR"
|
||||
|
||||
# The script lives in codex-cli/scripts/ - change into codex-cli root so that
|
||||
# relative paths keep working.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CODEX_CLI_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
pushd "$CODEX_CLI_ROOT" >/dev/null
|
||||
|
||||
# 1. Build the JS artifacts ---------------------------------------------------
|
||||
|
||||
# Paths inside the staged package
|
||||
mkdir -p "$TMPDIR/bin"
|
||||
|
||||
cp -r bin/codex.js "$TMPDIR/bin/codex.js"
|
||||
cp ../README.md "$TMPDIR" || true # README is one level up - ignore if missing
|
||||
|
||||
# Modify package.json - bump version and optionally add the native directory to
|
||||
# the files array so that the binaries are published to npm.
|
||||
|
||||
jq --arg version "$VERSION" \
|
||||
'.version = $version' \
|
||||
package.json > "$TMPDIR/package.json"
|
||||
|
||||
# 2. Native runtime deps (sandbox plus optional Rust binaries)
|
||||
|
||||
./scripts/install_native_deps.sh --workflow-url "$WORKFLOW_URL" "$TMPDIR"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
echo "Staged version $VERSION for release in $TMPDIR"
|
||||
|
||||
echo "Verify the CLI:"
|
||||
echo " node ${TMPDIR}/bin/codex.js --version"
|
||||
echo " node ${TMPDIR}/bin/codex.js --help"
|
||||
|
||||
# Print final hint for convenience
|
||||
echo "Next: cd \"$TMPDIR\" && npm publish"
|
||||
70
codex-cli/scripts/stage_rust_release.py
Executable file
70
codex-cli/scripts/stage_rust_release.py
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""Stage a release for the npm module.
|
||||
|
||||
Run this after the GitHub Release has been created and use
|
||||
`--release-version` to specify the version to release.
|
||||
|
||||
Optionally pass `--tmp` to control the temporary staging directory that will be
|
||||
forwarded to stage_release.sh.
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--release-version", required=True, help="Version to release, e.g., 0.3.0"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tmp",
|
||||
help="Optional path to stage the npm package; forwarded to stage_release.sh",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
version = args.release_version
|
||||
|
||||
gh_run = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"run",
|
||||
"list",
|
||||
"--branch",
|
||||
f"rust-v{version}",
|
||||
"--json",
|
||||
"workflowName,url,headSha",
|
||||
"--jq",
|
||||
'first(.[] | select(.workflowName == "rust-release"))',
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
check=True,
|
||||
)
|
||||
gh_run.check_returncode()
|
||||
workflow = json.loads(gh_run.stdout)
|
||||
sha = workflow["headSha"]
|
||||
|
||||
print(f"should `git checkout {sha}`")
|
||||
|
||||
current_dir = Path(__file__).parent.resolve()
|
||||
cmd = [
|
||||
str(current_dir / "stage_release.sh"),
|
||||
"--version",
|
||||
version,
|
||||
"--workflow-url",
|
||||
workflow["url"],
|
||||
]
|
||||
if args.tmp:
|
||||
cmd.extend(["--tmp", args.tmp])
|
||||
|
||||
stage_release = subprocess.run(cmd)
|
||||
stage_release.check_returncode()
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
695
codex-rs/Cargo.lock
generated
695
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -9,20 +9,15 @@ members = [
|
||||
"exec",
|
||||
"execpolicy",
|
||||
"file-search",
|
||||
"git-tooling",
|
||||
"linux-sandbox",
|
||||
"login",
|
||||
"mcp-client",
|
||||
"mcp-server",
|
||||
"mcp-types",
|
||||
"ollama",
|
||||
"process-hardening",
|
||||
"protocol",
|
||||
"protocol-ts",
|
||||
"rmcp-client",
|
||||
"responses-api-proxy",
|
||||
"tui",
|
||||
"utils/readiness",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -34,175 +29,15 @@ version = "0.0.0"
|
||||
# edition.
|
||||
edition = "2024"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Internal
|
||||
codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-apply-patch = { path = "apply-patch" }
|
||||
codex-arg0 = { path = "arg0" }
|
||||
codex-chatgpt = { path = "chatgpt" }
|
||||
codex-common = { path = "common" }
|
||||
codex-core = { path = "core" }
|
||||
codex-exec = { path = "exec" }
|
||||
codex-file-search = { path = "file-search" }
|
||||
codex-git-tooling = { path = "git-tooling" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-login = { path = "login" }
|
||||
codex-mcp-client = { path = "mcp-client" }
|
||||
codex-mcp-server = { path = "mcp-server" }
|
||||
codex-ollama = { path = "ollama" }
|
||||
codex-process-hardening = { path = "process-hardening" }
|
||||
codex-protocol = { path = "protocol" }
|
||||
codex-protocol-ts = { path = "protocol-ts" }
|
||||
codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-utils-readiness = { path = "utils/readiness" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
|
||||
# External
|
||||
allocative = "0.3.3"
|
||||
ansi-to-tui = "7.0.0"
|
||||
anyhow = "1"
|
||||
arboard = "3"
|
||||
askama = "0.12"
|
||||
assert_cmd = "2"
|
||||
async-channel = "2.3.1"
|
||||
async-stream = "0.3.6"
|
||||
async-trait = "0.1.89"
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.10.1"
|
||||
chrono = "0.4.42"
|
||||
clap = "4"
|
||||
clap_complete = "4"
|
||||
color-eyre = "0.6.3"
|
||||
crossterm = "0.28.1"
|
||||
ctor = "0.5.0"
|
||||
derive_more = "2"
|
||||
diffy = "0.4.2"
|
||||
dirs = "6"
|
||||
dotenvy = "0.15.7"
|
||||
env-flags = "0.1.1"
|
||||
env_logger = "0.11.5"
|
||||
escargot = "0.5"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = "0.3"
|
||||
icu_decimal = "2.0.0"
|
||||
icu_locale_core = "2.0.0"
|
||||
ignore = "0.4.23"
|
||||
image = { version = "^0.25.8", default-features = false }
|
||||
indexmap = "2.6.0"
|
||||
insta = "1.43.2"
|
||||
itertools = "0.14.0"
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.175"
|
||||
log = "0.4"
|
||||
maplit = "1.0.2"
|
||||
mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
openssl-sys = "*"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
pathdiff = "0.2"
|
||||
portable-pty = "0.9.0"
|
||||
predicates = "3"
|
||||
pretty_assertions = "1.4.1"
|
||||
pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = "0.12"
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.14"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
starlark = "0.13.0"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
supports-color = "3.0.2"
|
||||
sys-locale = "0.3.2"
|
||||
tempfile = "3.23.0"
|
||||
textwrap = "0.16.2"
|
||||
thiserror = "2.0.16"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.17"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.4"
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
ts-rs = "11"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
url = "2"
|
||||
urlencoding = "2.1"
|
||||
uuid = "1"
|
||||
vt100 = "0.16.2"
|
||||
walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
wildmatch = "2.5.0"
|
||||
wiremock = "0.6"
|
||||
zeroize = "1.8.1"
|
||||
|
||||
[workspace.lints]
|
||||
rust = {}
|
||||
|
||||
[workspace.lints.clippy]
|
||||
expect_used = "deny"
|
||||
identity_op = "deny"
|
||||
manual_clamp = "deny"
|
||||
manual_filter = "deny"
|
||||
manual_find = "deny"
|
||||
manual_flatten = "deny"
|
||||
manual_map = "deny"
|
||||
manual_memcpy = "deny"
|
||||
manual_non_exhaustive = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_range_contains = "deny"
|
||||
manual_retain = "deny"
|
||||
manual_strip = "deny"
|
||||
manual_try_fold = "deny"
|
||||
manual_unwrap_or = "deny"
|
||||
needless_borrow = "deny"
|
||||
needless_borrowed_reference = "deny"
|
||||
needless_collect = "deny"
|
||||
needless_late_init = "deny"
|
||||
needless_option_as_deref = "deny"
|
||||
needless_question_mark = "deny"
|
||||
needless_update = "deny"
|
||||
redundant_clone = "deny"
|
||||
redundant_closure = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
redundant_static_lifetimes = "deny"
|
||||
trivially_copy_pass_by_ref = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_filter_map = "deny"
|
||||
unnecessary_lazy_evaluations = "deny"
|
||||
unnecessary_sort_by = "deny"
|
||||
unnecessary_to_owned = "deny"
|
||||
unwrap_used = "deny"
|
||||
|
||||
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
|
||||
# silence the false positive here instead of deleting a real dependency.
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys", "codex-utils-readiness"]
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
# Because we bundle some of these executables with the TypeScript CLI, we
|
||||
|
||||
@@ -4,18 +4,18 @@ We provide Codex CLI as a standalone, native executable to ensure a zero-depende
|
||||
|
||||
## Installing Codex
|
||||
|
||||
Today, the easiest way to install Codex is via `npm`:
|
||||
Today, the easiest way to install Codex is via `npm`, though we plan to publish Codex to other package managers soon.
|
||||
|
||||
```shell
|
||||
npm i -g @openai/codex
|
||||
npm i -g @openai/codex@native
|
||||
codex
|
||||
```
|
||||
|
||||
You can also install via Homebrew (`brew install codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
|
||||
You can also download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
|
||||
|
||||
## What's new in the Rust CLI
|
||||
|
||||
The Rust implementation is now the maintained Codex CLI and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported.
|
||||
While we are [working to close the gap between the TypeScript and Rust implementations of Codex CLI](https://github.com/openai/codex/issues/1262), note that the Rust CLI has a number of features that the TypeScript CLI does not!
|
||||
|
||||
### Config
|
||||
|
||||
|
||||
@@ -8,9 +8,9 @@ name = "codex_ansi_escape"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
ansi-to-tui = { workspace = true }
|
||||
ratatui = { workspace = true, features = [
|
||||
ansi-to-tui = "7.0.0"
|
||||
ratatui = { version = "0.29.0", features = [
|
||||
"unstable-rendered-line-info",
|
||||
"unstable-widget-ref",
|
||||
] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
|
||||
@@ -15,13 +15,14 @@ path = "src/main.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
similar = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tree-sitter = { workspace = true }
|
||||
tree-sitter-bash = { workspace = true }
|
||||
anyhow = "1"
|
||||
similar = "2.7.0"
|
||||
thiserror = "2.0.16"
|
||||
tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
once_cell = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3.13.0"
|
||||
|
||||
@@ -6,10 +6,10 @@ use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::str::Utf8Error;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use once_cell::sync::Lazy;
|
||||
pub use parser::Hunk;
|
||||
pub use parser::ParseError;
|
||||
use parser::ParseError::*;
|
||||
@@ -351,7 +351,7 @@ fn extract_apply_patch_from_bash(
|
||||
// also run an arbitrary query against the AST. This is useful for understanding
|
||||
// how tree-sitter parses the script and whether the query syntax is correct. Be sure
|
||||
// to test both positive and negative cases.
|
||||
static APPLY_PATCH_QUERY: LazyLock<Query> = LazyLock::new(|| {
|
||||
static APPLY_PATCH_QUERY: Lazy<Query> = Lazy::new(|| {
|
||||
let language = BASH.into();
|
||||
#[expect(clippy::expect_used)]
|
||||
Query::new(
|
||||
@@ -648,18 +648,21 @@ fn derive_new_contents_from_chunks(
|
||||
}
|
||||
};
|
||||
|
||||
let mut original_lines: Vec<String> = original_contents.split('\n').map(String::from).collect();
|
||||
let mut original_lines: Vec<String> = original_contents
|
||||
.split('\n')
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
// Drop the trailing empty element that results from the final newline so
|
||||
// that line counts match the behaviour of standard `diff`.
|
||||
if original_lines.last().is_some_and(String::is_empty) {
|
||||
if original_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
original_lines.pop();
|
||||
}
|
||||
|
||||
let replacements = compute_replacements(&original_lines, path, chunks)?;
|
||||
let new_lines = apply_replacements(original_lines, &replacements);
|
||||
let mut new_lines = new_lines;
|
||||
if !new_lines.last().is_some_and(String::is_empty) {
|
||||
if !new_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
new_lines.push(String::new());
|
||||
}
|
||||
let new_contents = new_lines.join("\n");
|
||||
@@ -703,7 +706,7 @@ fn compute_replacements(
|
||||
if chunk.old_lines.is_empty() {
|
||||
// Pure addition (no old lines). We'll add them at the end or just
|
||||
// before the final empty line if one exists.
|
||||
let insertion_idx = if original_lines.last().is_some_and(String::is_empty) {
|
||||
let insertion_idx = if original_lines.last().is_some_and(|s| s.is_empty()) {
|
||||
original_lines.len() - 1
|
||||
} else {
|
||||
original_lines.len()
|
||||
@@ -729,11 +732,11 @@ fn compute_replacements(
|
||||
|
||||
let mut new_slice: &[String] = &chunk.new_lines;
|
||||
|
||||
if found.is_none() && pattern.last().is_some_and(String::is_empty) {
|
||||
if found.is_none() && pattern.last().is_some_and(|s| s.is_empty()) {
|
||||
// Retry without the trailing empty line which represents the final
|
||||
// newline in the file.
|
||||
pattern = &pattern[..pattern.len() - 1];
|
||||
if new_slice.last().is_some_and(String::is_empty) {
|
||||
if new_slice.last().is_some_and(|s| s.is_empty()) {
|
||||
new_slice = &new_slice[..new_slice.len() - 1];
|
||||
}
|
||||
|
||||
@@ -845,7 +848,6 @@ mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
use std::string::ToString;
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// Helper to construct a patch with the given body.
|
||||
@@ -854,7 +856,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn strs_to_strings(strs: &[&str]) -> Vec<String> {
|
||||
strs.iter().map(ToString::to_string).collect()
|
||||
strs.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
// Test helpers to reduce repetition when building bash -lc heredoc scripts
|
||||
|
||||
@@ -112,10 +112,9 @@ pub(crate) fn seek_sequence(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::seek_sequence;
|
||||
use std::string::ToString;
|
||||
|
||||
fn to_vec(strings: &[&str]) -> Vec<String> {
|
||||
strings.iter().map(ToString::to_string).collect()
|
||||
strings.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -11,10 +11,10 @@ path = "src/lib.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-linux-sandbox = { workspace = true }
|
||||
dotenvy = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||
anyhow = "1"
|
||||
codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-core = { path = "../core" }
|
||||
codex-linux-sandbox = { path = "../linux-sandbox" }
|
||||
dotenvy = "0.15.7"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = ["rt-multi-thread"] }
|
||||
|
||||
@@ -54,7 +54,7 @@ where
|
||||
|
||||
let argv1 = args.next().unwrap_or_default();
|
||||
if argv1 == CODEX_APPLY_PATCH_ARG1 {
|
||||
let patch_arg = args.next().and_then(|s| s.to_str().map(str::to_owned));
|
||||
let patch_arg = args.next().and_then(|s| s.to_str().map(|s| s.to_owned()));
|
||||
let exit_code = match patch_arg {
|
||||
Some(patch_arg) => {
|
||||
let mut stdout = std::io::stdout();
|
||||
|
||||
@@ -7,13 +7,13 @@ version = { workspace = true }
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-common = { workspace = true, features = ["cli"] }
|
||||
codex-core = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
tempfile = "3"
|
||||
|
||||
@@ -15,36 +15,26 @@ path = "src/lib.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
clap_complete = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-chatgpt = { workspace = true }
|
||||
codex-common = { workspace = true, features = ["cli"] }
|
||||
codex-core = { workspace = true }
|
||||
codex-exec = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-mcp-server = { workspace = true }
|
||||
codex-process-hardening = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-protocol-ts = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
supports-color = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
clap_complete = "4"
|
||||
codex-arg0 = { path = "../arg0" }
|
||||
codex-chatgpt = { path = "../chatgpt" }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-exec = { path = "../exec" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-mcp-server = { path = "../mcp-server" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
codex-tui = { path = "../tui" }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"process",
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.19"
|
||||
codex-protocol-ts = { path = "../protocol-ts" }
|
||||
|
||||
@@ -64,6 +64,7 @@ async fn run_command_under_sandbox(
|
||||
sandbox_type: SandboxType,
|
||||
) -> anyhow::Result<()> {
|
||||
let sandbox_mode = create_sandbox_mode(full_auto);
|
||||
let cwd = std::env::current_dir()?;
|
||||
let config = Config::load_with_cli_overrides(
|
||||
config_overrides
|
||||
.parse_overrides()
|
||||
@@ -74,29 +75,13 @@ async fn run_command_under_sandbox(
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
|
||||
// In practice, this should be `std::env::current_dir()` because this CLI
|
||||
// does not support `--cwd`, but let's use the config value for consistency.
|
||||
let cwd = config.cwd.clone();
|
||||
// For now, we always use the same cwd for both the command and the
|
||||
// sandbox policy. In the future, we could add a CLI option to set them
|
||||
// separately.
|
||||
let sandbox_policy_cwd = cwd.clone();
|
||||
|
||||
let stdio_policy = StdioPolicy::Inherit;
|
||||
let env = create_env(&config.shell_environment_policy);
|
||||
|
||||
let mut child = match sandbox_type {
|
||||
SandboxType::Seatbelt => {
|
||||
spawn_command_under_seatbelt(
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
.await?
|
||||
spawn_command_under_seatbelt(command, &config.sandbox_policy, cwd, stdio_policy, env)
|
||||
.await?
|
||||
}
|
||||
SandboxType::Landlock => {
|
||||
#[expect(clippy::expect_used)]
|
||||
@@ -106,9 +91,8 @@ async fn run_command_under_sandbox(
|
||||
spawn_command_under_linux_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
cwd,
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
cwd,
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
|
||||
@@ -14,15 +14,9 @@ use codex_cli::login::run_logout;
|
||||
use codex_cli::proto;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_exec::Cli as ExecCli;
|
||||
use codex_tui::AppExitInfo;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
use owo_colors::OwoColorize;
|
||||
use std::path::PathBuf;
|
||||
use supports_color::Stream;
|
||||
|
||||
mod mcp_cmd;
|
||||
|
||||
use crate::mcp_cmd::McpCli;
|
||||
use crate::proto::ProtoCli;
|
||||
|
||||
/// Codex CLI
|
||||
@@ -62,8 +56,8 @@ enum Subcommand {
|
||||
/// Remove stored authentication credentials.
|
||||
Logout(LogoutCommand),
|
||||
|
||||
/// [experimental] Run Codex as an MCP server and manage MCP servers.
|
||||
Mcp(McpCli),
|
||||
/// Experimental: run Codex as an MCP server.
|
||||
Mcp,
|
||||
|
||||
/// Run the Protocol stream via stdin/stdout
|
||||
#[clap(visible_alias = "p")]
|
||||
@@ -104,9 +98,6 @@ struct ResumeCommand {
|
||||
/// Continue the most recent session without showing the picker.
|
||||
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
|
||||
last: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
config_overrides: TuiCli,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -159,62 +150,6 @@ struct GenerateTsCommand {
|
||||
prettier: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<String> {
|
||||
let AppExitInfo {
|
||||
token_usage,
|
||||
conversation_id,
|
||||
} = exit_info;
|
||||
|
||||
if token_usage.is_zero() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut lines = vec![format!(
|
||||
"{}",
|
||||
codex_core::protocol::FinalOutput::from(token_usage)
|
||||
)];
|
||||
|
||||
if let Some(session_id) = conversation_id {
|
||||
let resume_cmd = format!("codex resume {session_id}");
|
||||
let command = if color_enabled {
|
||||
resume_cmd.cyan().to_string()
|
||||
} else {
|
||||
resume_cmd
|
||||
};
|
||||
lines.push(format!("To continue this session, run {command}"));
|
||||
}
|
||||
|
||||
lines
|
||||
}
|
||||
|
||||
fn print_exit_messages(exit_info: AppExitInfo) {
|
||||
let color_enabled = supports_color::on(Stream::Stdout).is_some();
|
||||
for line in format_exit_messages(exit_info, color_enabled) {
|
||||
println!("{line}");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) const CODEX_SECURE_MODE_ENV_VAR: &str = "CODEX_SECURE_MODE";
|
||||
|
||||
/// As early as possible in the process lifecycle, apply hardening measures
|
||||
/// if the CODEX_SECURE_MODE environment variable is set to "1".
|
||||
#[ctor::ctor]
|
||||
fn pre_main_hardening() {
|
||||
let secure_mode = match std::env::var(CODEX_SECURE_MODE_ENV_VAR) {
|
||||
Ok(value) => value,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
if secure_mode == "1" {
|
||||
codex_process_hardening::pre_main_hardening();
|
||||
}
|
||||
|
||||
// Always clear this env var so child processes don't inherit it.
|
||||
unsafe {
|
||||
std::env::remove_var(CODEX_SECURE_MODE_ENV_VAR);
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
cli_main(codex_linux_sandbox_exe).await?;
|
||||
@@ -235,8 +170,10 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
&mut interactive.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
print_exit_messages(exit_info);
|
||||
let usage = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
if !usage.is_zero() {
|
||||
println!("{}", codex_core::protocol::FinalOutput::from(usage));
|
||||
}
|
||||
}
|
||||
Some(Subcommand::Exec(mut exec_cli)) => {
|
||||
prepend_config_flags(
|
||||
@@ -245,22 +182,22 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
);
|
||||
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Mcp(mut mcp_cli)) => {
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone());
|
||||
mcp_cli.run(codex_linux_sandbox_exe).await?;
|
||||
Some(Subcommand::Mcp) => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, root_config_overrides.clone())
|
||||
.await?;
|
||||
}
|
||||
Some(Subcommand::Resume(ResumeCommand {
|
||||
session_id,
|
||||
last,
|
||||
config_overrides,
|
||||
})) => {
|
||||
interactive = finalize_resume_interactive(
|
||||
interactive,
|
||||
Some(Subcommand::Resume(ResumeCommand { session_id, last })) => {
|
||||
// Start with the parsed interactive CLI so resume shares the same
|
||||
// configuration surface area as `codex` without additional flags.
|
||||
let resume_session_id = session_id;
|
||||
interactive.resume_picker = resume_session_id.is_none() && !last;
|
||||
interactive.resume_last = last;
|
||||
interactive.resume_session_id = resume_session_id;
|
||||
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(
|
||||
&mut interactive.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
session_id,
|
||||
last,
|
||||
config_overrides,
|
||||
);
|
||||
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
@@ -349,256 +286,8 @@ fn prepend_config_flags(
|
||||
.splice(0..0, cli_config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
/// Build the final `TuiCli` for a `codex resume` invocation.
|
||||
fn finalize_resume_interactive(
|
||||
mut interactive: TuiCli,
|
||||
root_config_overrides: CliConfigOverrides,
|
||||
session_id: Option<String>,
|
||||
last: bool,
|
||||
resume_cli: TuiCli,
|
||||
) -> TuiCli {
|
||||
// Start with the parsed interactive CLI so resume shares the same
|
||||
// configuration surface area as `codex` without additional flags.
|
||||
let resume_session_id = session_id;
|
||||
interactive.resume_picker = resume_session_id.is_none() && !last;
|
||||
interactive.resume_last = last;
|
||||
interactive.resume_session_id = resume_session_id;
|
||||
|
||||
// Merge resume-scoped flags and overrides with highest precedence.
|
||||
merge_resume_cli_flags(&mut interactive, resume_cli);
|
||||
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut interactive.config_overrides, root_config_overrides);
|
||||
|
||||
interactive
|
||||
}
|
||||
|
||||
/// Merge flags provided to `codex resume` so they take precedence over any
|
||||
/// root-level flags. Only overrides fields explicitly set on the resume-scoped
|
||||
/// CLI. Also appends `-c key=value` overrides with highest precedence.
|
||||
fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) {
|
||||
if let Some(model) = resume_cli.model {
|
||||
interactive.model = Some(model);
|
||||
}
|
||||
if resume_cli.oss {
|
||||
interactive.oss = true;
|
||||
}
|
||||
if let Some(profile) = resume_cli.config_profile {
|
||||
interactive.config_profile = Some(profile);
|
||||
}
|
||||
if let Some(sandbox) = resume_cli.sandbox_mode {
|
||||
interactive.sandbox_mode = Some(sandbox);
|
||||
}
|
||||
if let Some(approval) = resume_cli.approval_policy {
|
||||
interactive.approval_policy = Some(approval);
|
||||
}
|
||||
if resume_cli.full_auto {
|
||||
interactive.full_auto = true;
|
||||
}
|
||||
if resume_cli.dangerously_bypass_approvals_and_sandbox {
|
||||
interactive.dangerously_bypass_approvals_and_sandbox = true;
|
||||
}
|
||||
if let Some(cwd) = resume_cli.cwd {
|
||||
interactive.cwd = Some(cwd);
|
||||
}
|
||||
if resume_cli.web_search {
|
||||
interactive.web_search = true;
|
||||
}
|
||||
if !resume_cli.images.is_empty() {
|
||||
interactive.images = resume_cli.images;
|
||||
}
|
||||
if let Some(prompt) = resume_cli.prompt {
|
||||
interactive.prompt = Some(prompt);
|
||||
}
|
||||
|
||||
interactive
|
||||
.config_overrides
|
||||
.raw_overrides
|
||||
.extend(resume_cli.config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
fn print_completion(cmd: CompletionCommand) {
|
||||
let mut app = MultitoolCli::command();
|
||||
let name = "codex";
|
||||
generate(cmd.shell, &mut app, name, &mut std::io::stdout());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
|
||||
fn finalize_from_args(args: &[&str]) -> TuiCli {
|
||||
let cli = MultitoolCli::try_parse_from(args).expect("parse");
|
||||
let MultitoolCli {
|
||||
interactive,
|
||||
config_overrides: root_overrides,
|
||||
subcommand,
|
||||
} = cli;
|
||||
|
||||
let Subcommand::Resume(ResumeCommand {
|
||||
session_id,
|
||||
last,
|
||||
config_overrides: resume_cli,
|
||||
}) = subcommand.expect("resume present")
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
finalize_resume_interactive(interactive, root_overrides, session_id, last, resume_cli)
|
||||
}
|
||||
|
||||
fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo {
|
||||
let token_usage = TokenUsage {
|
||||
output_tokens: 2,
|
||||
total_tokens: 2,
|
||||
..Default::default()
|
||||
};
|
||||
AppExitInfo {
|
||||
token_usage,
|
||||
conversation_id: conversation
|
||||
.map(ConversationId::from_string)
|
||||
.map(Result::unwrap),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_exit_messages_skips_zero_usage() {
|
||||
let exit_info = AppExitInfo {
|
||||
token_usage: TokenUsage::default(),
|
||||
conversation_id: None,
|
||||
};
|
||||
let lines = format_exit_messages(exit_info, false);
|
||||
assert!(lines.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_exit_messages_includes_resume_hint_without_color() {
|
||||
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
|
||||
let lines = format_exit_messages(exit_info, false);
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec![
|
||||
"Token usage: total=2 input=0 output=2".to_string(),
|
||||
"To continue this session, run codex resume 123e4567-e89b-12d3-a456-426614174000"
|
||||
.to_string(),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_exit_messages_applies_color_when_enabled() {
|
||||
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
|
||||
let lines = format_exit_messages(exit_info, true);
|
||||
assert_eq!(lines.len(), 2);
|
||||
assert!(lines[1].contains("\u{1b}[36m"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_model_flag_applies_when_no_root_flags() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5-test"].as_ref());
|
||||
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_none_and_not_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume"].as_ref());
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_with_session_id() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id.as_deref(), Some("1234"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_merges_option_flags_and_full_auto() {
|
||||
let interactive = finalize_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
"sid",
|
||||
"--oss",
|
||||
"--full-auto",
|
||||
"--search",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--ask-for-approval",
|
||||
"on-request",
|
||||
"-m",
|
||||
"gpt-5-test",
|
||||
"-p",
|
||||
"my-profile",
|
||||
"-C",
|
||||
"/tmp",
|
||||
"-i",
|
||||
"/tmp/a.png,/tmp/b.png",
|
||||
]
|
||||
.as_ref(),
|
||||
);
|
||||
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
|
||||
assert!(interactive.oss);
|
||||
assert_eq!(interactive.config_profile.as_deref(), Some("my-profile"));
|
||||
assert!(matches!(
|
||||
interactive.sandbox_mode,
|
||||
Some(codex_common::SandboxModeCliArg::WorkspaceWrite)
|
||||
));
|
||||
assert!(matches!(
|
||||
interactive.approval_policy,
|
||||
Some(codex_common::ApprovalModeCliArg::OnRequest)
|
||||
));
|
||||
assert!(interactive.full_auto);
|
||||
assert_eq!(
|
||||
interactive.cwd.as_deref(),
|
||||
Some(std::path::Path::new("/tmp"))
|
||||
);
|
||||
assert!(interactive.web_search);
|
||||
let has_a = interactive
|
||||
.images
|
||||
.iter()
|
||||
.any(|p| p == std::path::Path::new("/tmp/a.png"));
|
||||
let has_b = interactive
|
||||
.images
|
||||
.iter()
|
||||
.any(|p| p == std::path::Path::new("/tmp/b.png"));
|
||||
assert!(has_a && has_b);
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id.as_deref(), Some("sid"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_merges_dangerously_bypass_flag() {
|
||||
let interactive = finalize_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
"--dangerously-bypass-approvals-and-sandbox",
|
||||
]
|
||||
.as_ref(),
|
||||
);
|
||||
assert!(interactive.dangerously_bypass_approvals_and_sandbox);
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,461 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::write_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
/// Subcommands:
|
||||
/// - `serve` — run the MCP server on stdio
|
||||
/// - `list` — list configured servers (with `--json`)
|
||||
/// - `get` — show a single server (with `--json`)
|
||||
/// - `add` — add a server launcher entry to `~/.codex/config.toml`
|
||||
/// - `remove` — delete a server entry
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct McpCli {
|
||||
#[clap(flatten)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub cmd: Option<McpSubcommand>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum McpSubcommand {
|
||||
/// [experimental] Run the Codex MCP server (stdio transport).
|
||||
Serve,
|
||||
|
||||
/// [experimental] List configured MCP servers.
|
||||
List(ListArgs),
|
||||
|
||||
/// [experimental] Show details for a configured MCP server.
|
||||
Get(GetArgs),
|
||||
|
||||
/// [experimental] Add a global MCP server entry.
|
||||
Add(AddArgs),
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct ListArgs {
|
||||
/// Output the configured servers as JSON.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct GetArgs {
|
||||
/// Name of the MCP server to display.
|
||||
pub name: String,
|
||||
|
||||
/// Output the server configuration as JSON.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct AddArgs {
|
||||
/// Name for the MCP server configuration.
|
||||
pub name: String,
|
||||
|
||||
/// Environment variables to set when launching the server.
|
||||
#[arg(long, value_parser = parse_env_pair, value_name = "KEY=VALUE")]
|
||||
pub env: Vec<(String, String)>,
|
||||
|
||||
/// Command to launch the MCP server.
|
||||
#[arg(trailing_var_arg = true, num_args = 1..)]
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct RemoveArgs {
|
||||
/// Name of the MCP server configuration to remove.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl McpCli {
|
||||
pub async fn run(self, codex_linux_sandbox_exe: Option<PathBuf>) -> Result<()> {
|
||||
let McpCli {
|
||||
config_overrides,
|
||||
cmd,
|
||||
} = self;
|
||||
let subcommand = cmd.unwrap_or(McpSubcommand::Serve);
|
||||
|
||||
match subcommand {
|
||||
McpSubcommand::Serve => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, config_overrides).await?;
|
||||
}
|
||||
McpSubcommand::List(args) => {
|
||||
run_list(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Get(args) => {
|
||||
run_get(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Add(args) => {
|
||||
run_add(&config_overrides, args)?;
|
||||
}
|
||||
McpSubcommand::Remove(args) => {
|
||||
run_remove(&config_overrides, args)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
// Validate any provided overrides even though they are not currently applied.
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let AddArgs { name, env, command } = add_args;
|
||||
|
||||
validate_server_name(&name)?;
|
||||
|
||||
let mut command_parts = command.into_iter();
|
||||
let command_bin = command_parts
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("command is required"))?;
|
||||
let command_args: Vec<String> = command_parts.collect();
|
||||
|
||||
let env_map = if env.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let mut map = HashMap::new();
|
||||
for (key, value) in env {
|
||||
map.insert(key, value);
|
||||
}
|
||||
Some(map)
|
||||
};
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let new_entry = McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: command_bin,
|
||||
args: command_args,
|
||||
env: env_map,
|
||||
},
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
};
|
||||
|
||||
servers.insert(name.clone(), new_entry);
|
||||
|
||||
write_global_mcp_servers(&codex_home, &servers)
|
||||
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
|
||||
|
||||
println!("Added global MCP server '{name}'.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let RemoveArgs { name } = remove_args;
|
||||
|
||||
validate_server_name(&name)?;
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let removed = servers.remove(&name).is_some();
|
||||
|
||||
if removed {
|
||||
write_global_mcp_servers(&codex_home, &servers)
|
||||
.with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?;
|
||||
}
|
||||
|
||||
if removed {
|
||||
println!("Removed global MCP server '{name}'.");
|
||||
} else {
|
||||
println!("No MCP server named '{name}' found.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let mut entries: Vec<_> = config.mcp_servers.iter().collect();
|
||||
entries.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
|
||||
if list_args.json {
|
||||
let json_entries: Vec<_> = entries
|
||||
.into_iter()
|
||||
.map(|(name, cfg)| {
|
||||
let transport = match &cfg.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({
|
||||
"type": "stdio",
|
||||
"command": command,
|
||||
"args": args,
|
||||
"env": env,
|
||||
}),
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
serde_json::json!({
|
||||
"type": "streamable_http",
|
||||
"url": url,
|
||||
"bearer_token": bearer_token,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"transport": transport,
|
||||
"startup_timeout_sec": cfg
|
||||
.startup_timeout_sec
|
||||
.map(|timeout| timeout.as_secs_f64()),
|
||||
"tool_timeout_sec": cfg
|
||||
.tool_timeout_sec
|
||||
.map(|timeout| timeout.as_secs_f64()),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let output = serde_json::to_string_pretty(&json_entries)?;
|
||||
println!("{output}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if entries.is_empty() {
|
||||
println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut stdio_rows: Vec<[String; 4]> = Vec::new();
|
||||
let mut http_rows: Vec<[String; 3]> = Vec::new();
|
||||
|
||||
for (name, cfg) in entries {
|
||||
match &cfg.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
let args_display = if args.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
args.join(" ")
|
||||
};
|
||||
let env_display = match env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
};
|
||||
stdio_rows.push([name.clone(), command.clone(), args_display, env_display]);
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
let has_bearer = if bearer_token.is_some() {
|
||||
"True"
|
||||
} else {
|
||||
"False"
|
||||
};
|
||||
http_rows.push([name.clone(), url.clone(), has_bearer.into()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !stdio_rows.is_empty() {
|
||||
let mut widths = ["Name".len(), "Command".len(), "Args".len(), "Env".len()];
|
||||
for row in &stdio_rows {
|
||||
for (i, cell) in row.iter().enumerate() {
|
||||
widths[i] = widths[i].max(cell.len());
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
"Name",
|
||||
"Command",
|
||||
"Args",
|
||||
"Env",
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
);
|
||||
|
||||
for row in &stdio_rows {
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
row[0],
|
||||
row[1],
|
||||
row[2],
|
||||
row[3],
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !stdio_rows.is_empty() && !http_rows.is_empty() {
|
||||
println!();
|
||||
}
|
||||
|
||||
if !http_rows.is_empty() {
|
||||
let mut widths = ["Name".len(), "Url".len(), "Has Bearer Token".len()];
|
||||
for row in &http_rows {
|
||||
for (i, cell) in row.iter().enumerate() {
|
||||
widths[i] = widths[i].max(cell.len());
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<name_w$} {:<url_w$} {:<token_w$}",
|
||||
"Name",
|
||||
"Url",
|
||||
"Has Bearer Token",
|
||||
name_w = widths[0],
|
||||
url_w = widths[1],
|
||||
token_w = widths[2],
|
||||
);
|
||||
|
||||
for row in &http_rows {
|
||||
println!(
|
||||
"{:<name_w$} {:<url_w$} {:<token_w$}",
|
||||
row[0],
|
||||
row[1],
|
||||
row[2],
|
||||
name_w = widths[0],
|
||||
url_w = widths[1],
|
||||
token_w = widths[2],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&get_args.name) else {
|
||||
bail!("No MCP server named '{name}' found.", name = get_args.name);
|
||||
};
|
||||
|
||||
if get_args.json {
|
||||
let transport = match &server.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({
|
||||
"type": "stdio",
|
||||
"command": command,
|
||||
"args": args,
|
||||
"env": env,
|
||||
}),
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => serde_json::json!({
|
||||
"type": "streamable_http",
|
||||
"url": url,
|
||||
"bearer_token": bearer_token,
|
||||
}),
|
||||
};
|
||||
let output = serde_json::to_string_pretty(&serde_json::json!({
|
||||
"name": get_args.name,
|
||||
"transport": transport,
|
||||
"startup_timeout_sec": server
|
||||
.startup_timeout_sec
|
||||
.map(|timeout| timeout.as_secs_f64()),
|
||||
"tool_timeout_sec": server
|
||||
.tool_timeout_sec
|
||||
.map(|timeout| timeout.as_secs_f64()),
|
||||
}))?;
|
||||
println!("{output}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("{}", get_args.name);
|
||||
match &server.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
println!(" transport: stdio");
|
||||
println!(" command: {command}");
|
||||
let args_display = if args.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
args.join(" ")
|
||||
};
|
||||
println!(" args: {args_display}");
|
||||
let env_display = match env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
};
|
||||
println!(" env: {env_display}");
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
println!(" transport: streamable_http");
|
||||
println!(" url: {url}");
|
||||
let bearer = bearer_token.as_deref().unwrap_or("-");
|
||||
println!(" bearer_token: {bearer}");
|
||||
}
|
||||
}
|
||||
if let Some(timeout) = server.startup_timeout_sec {
|
||||
println!(" startup_timeout_sec: {}", timeout.as_secs_f64());
|
||||
}
|
||||
if let Some(timeout) = server.tool_timeout_sec {
|
||||
println!(" tool_timeout_sec: {}", timeout.as_secs_f64());
|
||||
}
|
||||
println!(" remove: codex mcp remove {}", get_args.name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_env_pair(raw: &str) -> Result<(String, String), String> {
|
||||
let mut parts = raw.splitn(2, '=');
|
||||
let key = parts
|
||||
.next()
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?;
|
||||
let value = parts
|
||||
.next()
|
||||
.map(str::to_string)
|
||||
.ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?;
|
||||
|
||||
Ok((key.to_string(), value))
|
||||
}
|
||||
|
||||
fn validate_server_name(name: &str) -> Result<()> {
|
||||
let is_valid = !name.is_empty()
|
||||
&& name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_');
|
||||
|
||||
if is_valid {
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("invalid server name '{name}' (use letters, numbers, '-', '_')");
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args(["mcp", "add", "docs", "--", "echo", "hello"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("Added global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert_eq!(servers.len(), 1);
|
||||
let docs = servers.get("docs").expect("server should exist");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
assert_eq!(command, "echo");
|
||||
assert_eq!(args, &vec!["hello".to_string()]);
|
||||
assert!(env.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
}
|
||||
|
||||
let mut remove_cmd = codex_command(codex_home.path())?;
|
||||
remove_cmd
|
||||
.args(["mcp", "remove", "docs"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("Removed global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
let mut remove_again_cmd = codex_command(codex_home.path())?;
|
||||
remove_again_cmd
|
||||
.args(["mcp", "remove", "docs"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("No MCP server named 'docs' found."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"envy",
|
||||
"--env",
|
||||
"FOO=bar",
|
||||
"--env",
|
||||
"ALPHA=beta",
|
||||
"--",
|
||||
"python",
|
||||
"server.py",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let envy = servers.get("envy").expect("server should exist");
|
||||
let env = match &envy.transport {
|
||||
McpServerTransportConfig::Stdio { env: Some(env), .. } => env,
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
};
|
||||
|
||||
assert_eq!(env.len(), 2);
|
||||
assert_eq!(env.get("FOO"), Some(&"bar".to_string()));
|
||||
assert_eq!(env.get("ALPHA"), Some(&"beta".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value as JsonValue;
|
||||
use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_shows_empty_state() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut cmd = codex_command(codex_home.path())?;
|
||||
let output = cmd.args(["mcp", "list"]).output()?;
|
||||
assert!(output.status.success());
|
||||
let stdout = String::from_utf8(output.stdout)?;
|
||||
assert!(stdout.contains("No MCP servers configured yet."));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_and_get_render_expected_output() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add = codex_command(codex_home.path())?;
|
||||
add.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"docs",
|
||||
"--env",
|
||||
"TOKEN=secret",
|
||||
"--",
|
||||
"docs-server",
|
||||
"--port",
|
||||
"4000",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let mut list_cmd = codex_command(codex_home.path())?;
|
||||
let list_output = list_cmd.args(["mcp", "list"]).output()?;
|
||||
assert!(list_output.status.success());
|
||||
let stdout = String::from_utf8(list_output.stdout)?;
|
||||
assert!(stdout.contains("Name"));
|
||||
assert!(stdout.contains("docs"));
|
||||
assert!(stdout.contains("docs-server"));
|
||||
assert!(stdout.contains("TOKEN=secret"));
|
||||
|
||||
let mut list_json_cmd = codex_command(codex_home.path())?;
|
||||
let json_output = list_json_cmd.args(["mcp", "list", "--json"]).output()?;
|
||||
assert!(json_output.status.success());
|
||||
let stdout = String::from_utf8(json_output.stdout)?;
|
||||
let parsed: JsonValue = serde_json::from_str(&stdout)?;
|
||||
assert_eq!(
|
||||
parsed,
|
||||
json!([
|
||||
{
|
||||
"name": "docs",
|
||||
"transport": {
|
||||
"type": "stdio",
|
||||
"command": "docs-server",
|
||||
"args": [
|
||||
"--port",
|
||||
"4000"
|
||||
],
|
||||
"env": {
|
||||
"TOKEN": "secret"
|
||||
}
|
||||
},
|
||||
"startup_timeout_sec": null,
|
||||
"tool_timeout_sec": null
|
||||
}
|
||||
]
|
||||
)
|
||||
);
|
||||
|
||||
let mut get_cmd = codex_command(codex_home.path())?;
|
||||
let get_output = get_cmd.args(["mcp", "get", "docs"]).output()?;
|
||||
assert!(get_output.status.success());
|
||||
let stdout = String::from_utf8(get_output.stdout)?;
|
||||
assert!(stdout.contains("docs"));
|
||||
assert!(stdout.contains("transport: stdio"));
|
||||
assert!(stdout.contains("command: docs-server"));
|
||||
assert!(stdout.contains("args: --port 4000"));
|
||||
assert!(stdout.contains("env: TOKEN=secret"));
|
||||
assert!(stdout.contains("remove: codex mcp remove docs"));
|
||||
|
||||
let mut get_json_cmd = codex_command(codex_home.path())?;
|
||||
get_json_cmd
|
||||
.args(["mcp", "get", "docs", "--json"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"docs\""));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -7,11 +7,11 @@ version = { workspace = true }
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
clap = { workspace = true, features = ["derive", "wrap_help"], optional = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
toml = { workspace = true, optional = true }
|
||||
clap = { version = "4", features = ["derive", "wrap_help"], optional = true }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
serde = { version = "1", optional = true }
|
||||
toml = { version = "0.9", optional = true }
|
||||
|
||||
[features]
|
||||
# Separate feature so that `clap` is not a mandatory dependency.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use codex_core::config::SWIFTFOX_MEDIUM_MODEL;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
@@ -18,24 +19,24 @@ pub struct ModelPreset {
|
||||
|
||||
const PRESETS: &[ModelPreset] = &[
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-low",
|
||||
label: "gpt-5-codex low",
|
||||
id: "swiftfox-low",
|
||||
label: "swiftfox low",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
model: "swiftfox",
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-medium",
|
||||
label: "gpt-5-codex medium",
|
||||
id: "swiftfox-medium",
|
||||
label: "swiftfox medium",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
model: "swiftfox",
|
||||
effort: None,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-high",
|
||||
label: "gpt-5-codex high",
|
||||
id: "swiftfox-high",
|
||||
label: "swiftfox high",
|
||||
description: "",
|
||||
model: "gpt-5-codex",
|
||||
model: "swiftfox",
|
||||
effort: Some(ReasoningEffort::High),
|
||||
},
|
||||
ModelPreset {
|
||||
@@ -68,6 +69,13 @@ const PRESETS: &[ModelPreset] = &[
|
||||
},
|
||||
];
|
||||
|
||||
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
|
||||
PRESETS.to_vec()
|
||||
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
|
||||
match auth_mode {
|
||||
Some(AuthMode::ApiKey) => PRESETS
|
||||
.iter()
|
||||
.copied()
|
||||
.filter(|p| !p.model.contains(SWIFTFOX_MEDIUM_MODEL))
|
||||
.collect(),
|
||||
_ => PRESETS.to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,93 +4,84 @@ name = "codex-core"
|
||||
version = { workspace = true }
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
name = "codex_core"
|
||||
path = "src/lib.rs"
|
||||
doctest = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
askama = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-file-search = { workspace = true }
|
||||
codex-mcp-client = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
env-flags = { workspace = true }
|
||||
eventsource-stream = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
os_info = { workspace = true }
|
||||
portable-pty = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha1 = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
similar = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
time = { workspace = true, features = [
|
||||
"formatting",
|
||||
"parsing",
|
||||
"local-offset",
|
||||
"macros",
|
||||
] }
|
||||
tokio = { workspace = true, features = [
|
||||
anyhow = "1"
|
||||
askama = "0.12"
|
||||
async-channel = "2.3.1"
|
||||
base64 = "0.22"
|
||||
bytes = "1.10.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-file-search = { path = "../file-search" }
|
||||
codex-mcp-client = { path = "../mcp-client" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
dirs = "6"
|
||||
env-flags = "0.1.1"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = "0.3"
|
||||
libc = "0.2.175"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
os_info = "3.12.0"
|
||||
portable-pty = "0.9.0"
|
||||
rand = "0.9"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
sha1 = "0.10.6"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
strum_macros = "0.27.2"
|
||||
tempfile = "3"
|
||||
thiserror = "2.0.16"
|
||||
time = { version = "0.3", features = ["formatting", "parsing", "local-offset", "macros"] }
|
||||
tokio = { version = "1", features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"process",
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
toml_edit = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tree-sitter = { workspace = true }
|
||||
tree-sitter-bash = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
which = { workspace = true }
|
||||
wildmatch = { workspace = true }
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.4"
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
which = "6"
|
||||
wildmatch = "2.4.0"
|
||||
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
landlock = { workspace = true }
|
||||
seccompiler = { workspace = true }
|
||||
landlock = "0.4.1"
|
||||
seccompiler = "0.5.0"
|
||||
|
||||
# Build OpenSSL from source for musl builds.
|
||||
[target.x86_64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
openssl-sys = { version = "*", features = ["vendored"] }
|
||||
|
||||
# Build OpenSSL from source for musl builds.
|
||||
[target.aarch64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
openssl-sys = { version = "*", features = ["vendored"] }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
maplit = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
core_test_support = { path = "tests/common" }
|
||||
maplit = "1.0.2"
|
||||
predicates = "3"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3"
|
||||
tokio-test = "0.4"
|
||||
walkdir = "2.5.0"
|
||||
wiremock = "0.6"
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys"]
|
||||
|
||||
@@ -251,16 +251,6 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command.
|
||||
- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``).
|
||||
|
||||
**File References**
|
||||
When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
|
||||
**Structure**
|
||||
|
||||
- Place related bullets together; don’t mix unrelated concepts in the same section.
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::protocol::FileChange;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_patch_safety;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::ApplyPatchFileChange;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -16,7 +17,7 @@ pub(crate) enum InternalApplyPatchInvocation {
|
||||
/// The `apply_patch` call was handled programmatically, without any sort
|
||||
/// of sandbox, because the user explicitly approved it. This is the
|
||||
/// result to use with the `shell` function call that contained `apply_patch`.
|
||||
Output(Result<String, FunctionCallError>),
|
||||
Output(ResponseInputItem),
|
||||
|
||||
/// The `apply_patch` call was approved, either automatically because it
|
||||
/// appears that it should be allowed based on the user's sandbox policy
|
||||
@@ -32,6 +33,12 @@ pub(crate) struct ApplyPatchExec {
|
||||
pub(crate) user_explicitly_approved_this_action: bool,
|
||||
}
|
||||
|
||||
impl From<ResponseInputItem> for InternalApplyPatchInvocation {
|
||||
fn from(item: ResponseInputItem) -> Self {
|
||||
InternalApplyPatchInvocation::Output(item)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_patch(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
@@ -70,15 +77,25 @@ pub(crate) async fn apply_patch(
|
||||
})
|
||||
}
|
||||
ReviewDecision::Denied | ReviewDecision::Abort => {
|
||||
InternalApplyPatchInvocation::Output(Err(FunctionCallError::RespondToModel(
|
||||
"patch rejected by user".to_string(),
|
||||
)))
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.to_owned(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "patch rejected by user".to_string(),
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
SafetyCheck::Reject { reason } => InternalApplyPatchInvocation::Output(Err(
|
||||
FunctionCallError::RespondToModel(format!("patch rejected: {reason}")),
|
||||
)),
|
||||
SafetyCheck::Reject { reason } => ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.to_owned(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("patch rejected: {reason}"),
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
.into(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -267,9 +267,6 @@ pub fn try_read_auth_json(auth_file: &Path) -> std::io::Result<AuthDotJson> {
|
||||
}
|
||||
|
||||
pub fn write_auth_json(auth_file: &Path, auth_dot_json: &AuthDotJson) -> std::io::Result<()> {
|
||||
if let Some(parent) = auth_file.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let json_data = serde_json::to_string_pretty(auth_dot_json)?;
|
||||
let mut options = OpenOptions::new();
|
||||
options.truncate(true).write(true).create(true);
|
||||
|
||||
@@ -60,9 +60,6 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
}
|
||||
} else {
|
||||
// Reject any punctuation / operator tokens that are not explicitly allowed.
|
||||
if kind.chars().any(|c| "&;|".contains(c)) && !ALLOWED_PUNCT_TOKENS.contains(&kind) {
|
||||
return None;
|
||||
}
|
||||
if !(ALLOWED_PUNCT_TOKENS.contains(&kind) || kind.trim().is_empty()) {
|
||||
// If it's a quote token or operator it's allowed above; we also allow whitespace tokens.
|
||||
// Any other punctuation like parentheses, braces, redirects, backticks, etc are rejected.
|
||||
@@ -74,12 +71,9 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
}
|
||||
}
|
||||
|
||||
// Walk uses a stack (LIFO), so re-sort by position to restore source order.
|
||||
command_nodes.sort_by_key(Node::start_byte);
|
||||
|
||||
let mut commands = Vec::new();
|
||||
for node in command_nodes {
|
||||
if let Some(words) = parse_plain_command_from_node(node, src) {
|
||||
if let Some(words) = extract_words_from_command_node(node, src) {
|
||||
commands.push(words);
|
||||
} else {
|
||||
return None;
|
||||
@@ -88,22 +82,10 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
Some(commands)
|
||||
}
|
||||
|
||||
/// Returns the sequence of plain commands within a `bash -lc "..."` invocation
|
||||
/// when the script only contains word-only commands joined by safe operators.
|
||||
pub fn parse_bash_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> {
|
||||
let [bash, flag, script] = command else {
|
||||
return None;
|
||||
};
|
||||
|
||||
if bash != "bash" || flag != "-lc" {
|
||||
return None;
|
||||
}
|
||||
|
||||
let tree = try_parse_bash(script)?;
|
||||
try_parse_word_only_commands_sequence(&tree, script)
|
||||
}
|
||||
|
||||
fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Vec<String>> {
|
||||
/// Extract the plain words of a simple command node, normalizing quoted
|
||||
/// strings into their contents. Returns None if the node contains unsupported
|
||||
/// constructs for a word-only command.
|
||||
pub(crate) fn extract_words_from_command_node(cmd: Node, src: &str) -> Option<Vec<String>> {
|
||||
if cmd.kind() != "command" {
|
||||
return None;
|
||||
}
|
||||
@@ -149,6 +131,48 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
Some(words)
|
||||
}
|
||||
|
||||
/// Find the earliest `command` node in source order within the parse tree.
|
||||
pub(crate) fn find_first_command_node(tree: &Tree) -> Option<Node<'_>> {
|
||||
let root = tree.root_node();
|
||||
let mut cursor = root.walk();
|
||||
let mut stack = vec![root];
|
||||
let mut best: Option<Node> = None;
|
||||
while let Some(node) = stack.pop() {
|
||||
if node.is_named()
|
||||
&& node.kind() == "command"
|
||||
&& best.is_none_or(|b| node.start_byte() < b.start_byte())
|
||||
{
|
||||
best = Some(node);
|
||||
}
|
||||
for child in node.children(&mut cursor) {
|
||||
stack.push(child);
|
||||
}
|
||||
}
|
||||
best
|
||||
}
|
||||
|
||||
/// Given the first command node, return the byte index in `src` at which the
|
||||
/// remainder script starts, only if the next non-whitespace token is an allowed
|
||||
/// sequencing operator for dropping the leading `cd`.
|
||||
///
|
||||
/// Allowed operators: `&&` (conditional on success) and `;` (unconditional).
|
||||
/// Disallowed: `||`, `|` — removing `cd` would change semantics.
|
||||
pub(crate) fn remainder_start_after_wrapper_operator(first_cmd: Node, src: &str) -> Option<usize> {
|
||||
let mut sib = first_cmd.next_sibling()?;
|
||||
while !sib.is_named() && sib.kind().trim().is_empty() {
|
||||
sib = sib.next_sibling()?;
|
||||
}
|
||||
if sib.is_named() || (sib.kind() != "&&" && sib.kind() != ";") {
|
||||
return None;
|
||||
}
|
||||
let mut idx = sib.end_byte();
|
||||
let bytes = src.as_bytes();
|
||||
while idx < bytes.len() && bytes[idx].is_ascii_whitespace() {
|
||||
idx += 1;
|
||||
}
|
||||
if idx >= bytes.len() { None } else { Some(idx) }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -169,10 +193,10 @@ mod tests {
|
||||
let src = "ls && pwd; echo 'hi there' | wc -l";
|
||||
let cmds = parse_seq(src).unwrap();
|
||||
let expected: Vec<Vec<String>> = vec![
|
||||
vec!["ls".to_string()],
|
||||
vec!["pwd".to_string()],
|
||||
vec!["echo".to_string(), "hi there".to_string()],
|
||||
vec!["wc".to_string(), "-l".to_string()],
|
||||
vec!["echo".to_string(), "hi there".to_string()],
|
||||
vec!["pwd".to_string()],
|
||||
vec!["ls".to_string()],
|
||||
];
|
||||
assert_eq!(cmds, expected);
|
||||
}
|
||||
@@ -234,4 +258,45 @@ mod tests {
|
||||
fn rejects_trailing_operator_parse_error() {
|
||||
assert!(parse_seq("ls &&").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_first_command_node_finds_cd() {
|
||||
let src = "cd foo && ls; git status";
|
||||
let tree = try_parse_bash(src).unwrap();
|
||||
let first = find_first_command_node(&tree).unwrap();
|
||||
let words = extract_words_from_command_node(first, src).unwrap();
|
||||
assert_eq!(words, vec!["cd".to_string(), "foo".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remainder_after_wrapper_operator_allows_and_and_semicolon() {
|
||||
// Allows &&
|
||||
let src = "cd foo && ls; git status";
|
||||
let tree = try_parse_bash(src).unwrap();
|
||||
let first = find_first_command_node(&tree).unwrap();
|
||||
let idx = remainder_start_after_wrapper_operator(first, src).unwrap();
|
||||
assert_eq!(&src[idx..], "ls; git status");
|
||||
|
||||
// Allows ;
|
||||
let src2 = "cd foo; ls";
|
||||
let tree2 = try_parse_bash(src2).unwrap();
|
||||
let first2 = find_first_command_node(&tree2).unwrap();
|
||||
let idx2 = remainder_start_after_wrapper_operator(first2, src2).unwrap();
|
||||
assert_eq!(&src2[idx2..], "ls");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remainder_after_wrapper_operator_rejects_or_and_pipe() {
|
||||
// Rejects ||
|
||||
let src = "cd foo || echo hi";
|
||||
let tree = try_parse_bash(src).unwrap();
|
||||
let first = find_first_command_node(&tree).unwrap();
|
||||
assert!(remainder_start_after_wrapper_operator(first, src).is_none());
|
||||
|
||||
// Rejects |
|
||||
let src2 = "cd foo | rg bar";
|
||||
let tree2 = try_parse_bash(src2).unwrap();
|
||||
let first2 = find_first_command_node(&tree2).unwrap();
|
||||
assert!(remainder_start_after_wrapper_operator(first2, src2).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,12 +35,6 @@ pub(crate) async fn stream_chat_completions(
|
||||
client: &reqwest::Client,
|
||||
provider: &ModelProviderInfo,
|
||||
) -> Result<ResponseStream> {
|
||||
if prompt.output_schema.is_some() {
|
||||
return Err(CodexErr::UnsupportedOperation(
|
||||
"output_schema is not supported for Chat Completions API".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Build messages array
|
||||
let mut messages = Vec::<serde_json::Value>::new();
|
||||
|
||||
@@ -468,7 +462,7 @@ async fn process_chat_sse<S>(
|
||||
if let Some(reasoning_val) = choice.get("delta").and_then(|d| d.get("reasoning")) {
|
||||
let mut maybe_text = reasoning_val
|
||||
.as_str()
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
.filter(|s| !s.is_empty());
|
||||
|
||||
if maybe_text.is_none() && reasoning_val.is_object() {
|
||||
@@ -722,9 +716,6 @@ where
|
||||
// Not an assistant message – forward immediately.
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
|
||||
@@ -4,7 +4,6 @@ use std::sync::OnceLock;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::AuthManager;
|
||||
use crate::auth::CodexAuth;
|
||||
use bytes::Bytes;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
@@ -12,7 +11,6 @@ use eventsource_stream::Eventsource;
|
||||
use futures::prelude::*;
|
||||
use regex_lite::Regex;
|
||||
use reqwest::StatusCode;
|
||||
use reqwest::header::HeaderMap;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
@@ -42,8 +40,6 @@ use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::openai_model_info::get_model_info;
|
||||
use crate::openai_tools::create_tools_json_for_responses_api;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::RateLimitWindow;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::util::backoff;
|
||||
@@ -184,22 +180,18 @@ impl ModelClient {
|
||||
|
||||
let input_with_instructions = prompt.get_formatted_input();
|
||||
|
||||
let verbosity = match &self.config.model_family.family {
|
||||
family if family == "gpt-5" => self.config.model_verbosity,
|
||||
_ => {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored for non-gpt-5 model family: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// Only include `text.verbosity` for GPT-5 family models
|
||||
let text = create_text_param_for_request(verbosity, &prompt.output_schema);
|
||||
let text = if self.config.model_family.family == "gpt-5" {
|
||||
create_text_param_for_request(self.config.model_verbosity)
|
||||
} else {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored for non-gpt-5 model family: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
// In general, we want to explicitly send `store: false` when using the Responses API,
|
||||
// but in practice, the Azure Responses API rejects `store: false`:
|
||||
@@ -229,169 +221,144 @@ impl ModelClient {
|
||||
if azure_workaround {
|
||||
attach_item_ids(&mut payload_json, &input_with_instructions);
|
||||
}
|
||||
let payload_body = serde_json::to_string(&payload_json)?;
|
||||
|
||||
let max_attempts = self.provider.request_max_retries();
|
||||
for attempt in 0..=max_attempts {
|
||||
match self
|
||||
.attempt_stream_responses(&payload_json, &auth_manager)
|
||||
.await
|
||||
let mut attempt = 0;
|
||||
let max_retries = self.provider.request_max_retries();
|
||||
|
||||
loop {
|
||||
attempt += 1;
|
||||
|
||||
// Always fetch the latest auth in case a prior attempt refreshed the token.
|
||||
let auth = auth_manager.as_ref().and_then(|m| m.auth());
|
||||
|
||||
trace!(
|
||||
"POST to {}: {}",
|
||||
self.provider.get_full_url(&auth),
|
||||
payload_body.as_str()
|
||||
);
|
||||
|
||||
let mut req_builder = self
|
||||
.provider
|
||||
.create_request_builder(&self.client, &auth)
|
||||
.await?;
|
||||
|
||||
req_builder = req_builder
|
||||
.header("OpenAI-Beta", "responses=experimental")
|
||||
// Send session_id for compatibility.
|
||||
.header("conversation_id", self.conversation_id.to_string())
|
||||
.header("session_id", self.conversation_id.to_string())
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.json(&payload_json);
|
||||
|
||||
if let Some(auth) = auth.as_ref()
|
||||
&& auth.mode == AuthMode::ChatGPT
|
||||
&& let Some(account_id) = auth.get_account_id()
|
||||
{
|
||||
Ok(stream) => {
|
||||
return Ok(stream);
|
||||
req_builder = req_builder.header("chatgpt-account-id", account_id);
|
||||
}
|
||||
|
||||
let res = req_builder.send().await;
|
||||
if let Ok(resp) = &res {
|
||||
trace!(
|
||||
"Response status: {}, cf-ray: {}",
|
||||
resp.status(),
|
||||
resp.headers()
|
||||
.get("cf-ray")
|
||||
.map(|v| v.to_str().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
|
||||
// spawn task to process SSE
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
tokio::spawn(process_sse(
|
||||
stream,
|
||||
tx_event,
|
||||
self.provider.stream_idle_timeout(),
|
||||
));
|
||||
|
||||
return Ok(ResponseStream { rx_event });
|
||||
}
|
||||
Err(StreamAttemptError::Fatal(e)) => {
|
||||
return Err(e);
|
||||
}
|
||||
Err(retryable_attempt_error) => {
|
||||
if attempt == max_attempts {
|
||||
return Err(retryable_attempt_error.into_error());
|
||||
Ok(res) => {
|
||||
let status = res.status();
|
||||
|
||||
// Pull out Retry‑After header if present.
|
||||
let retry_after_secs = res
|
||||
.headers()
|
||||
.get(reqwest::header::RETRY_AFTER)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| s.parse::<u64>().ok());
|
||||
|
||||
if status == StatusCode::UNAUTHORIZED
|
||||
&& let Some(manager) = auth_manager.as_ref()
|
||||
&& manager.auth().is_some()
|
||||
{
|
||||
let _ = manager.refresh_token().await;
|
||||
}
|
||||
|
||||
tokio::time::sleep(retryable_attempt_error.delay(attempt)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
|
||||
// errors. When we bubble early with only the HTTP status the caller sees an opaque
|
||||
// "unexpected status 400 Bad Request" which makes debugging nearly impossible.
|
||||
// Instead, read (and include) the response text so higher layers and users see the
|
||||
// exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is
|
||||
// small and this branch only runs on error paths so the extra allocation is
|
||||
// negligible.
|
||||
if !(status == StatusCode::TOO_MANY_REQUESTS
|
||||
|| status == StatusCode::UNAUTHORIZED
|
||||
|| status.is_server_error())
|
||||
{
|
||||
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
|
||||
let body = res.text().await.unwrap_or_default();
|
||||
return Err(CodexErr::UnexpectedStatus(status, body));
|
||||
}
|
||||
|
||||
unreachable!("stream_responses_attempt should always return");
|
||||
}
|
||||
|
||||
/// Single attempt to start a streaming Responses API call.
|
||||
async fn attempt_stream_responses(
|
||||
&self,
|
||||
payload_json: &Value,
|
||||
auth_manager: &Option<Arc<AuthManager>>,
|
||||
) -> std::result::Result<ResponseStream, StreamAttemptError> {
|
||||
// Always fetch the latest auth in case a prior attempt refreshed the token.
|
||||
let auth = auth_manager.as_ref().and_then(|m| m.auth());
|
||||
|
||||
trace!(
|
||||
"POST to {}: {:?}",
|
||||
self.provider.get_full_url(&auth),
|
||||
serde_json::to_string(payload_json)
|
||||
);
|
||||
|
||||
let mut req_builder = self
|
||||
.provider
|
||||
.create_request_builder(&self.client, &auth)
|
||||
.await
|
||||
.map_err(StreamAttemptError::Fatal)?;
|
||||
|
||||
req_builder = req_builder
|
||||
.header("OpenAI-Beta", "responses=experimental")
|
||||
// Send session_id for compatibility.
|
||||
.header("conversation_id", self.conversation_id.to_string())
|
||||
.header("session_id", self.conversation_id.to_string())
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.json(payload_json);
|
||||
|
||||
if let Some(auth) = auth.as_ref()
|
||||
&& auth.mode == AuthMode::ChatGPT
|
||||
&& let Some(account_id) = auth.get_account_id()
|
||||
{
|
||||
req_builder = req_builder.header("chatgpt-account-id", account_id);
|
||||
}
|
||||
|
||||
let res = req_builder.send().await;
|
||||
if let Ok(resp) = &res {
|
||||
trace!(
|
||||
"Response status: {}, cf-ray: {}",
|
||||
resp.status(),
|
||||
resp.headers()
|
||||
.get("cf-ray")
|
||||
.map(|v| v.to_str().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
|
||||
if let Some(snapshot) = parse_rate_limit_snapshot(resp.headers())
|
||||
&& tx_event
|
||||
.send(Ok(ResponseEvent::RateLimits(snapshot)))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
debug!("receiver dropped rate limit snapshot event");
|
||||
}
|
||||
|
||||
// spawn task to process SSE
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
tokio::spawn(process_sse(
|
||||
stream,
|
||||
tx_event,
|
||||
self.provider.stream_idle_timeout(),
|
||||
));
|
||||
|
||||
Ok(ResponseStream { rx_event })
|
||||
}
|
||||
Ok(res) => {
|
||||
let status = res.status();
|
||||
|
||||
// Pull out Retry‑After header if present.
|
||||
let retry_after_secs = res
|
||||
.headers()
|
||||
.get(reqwest::header::RETRY_AFTER)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| s.parse::<u64>().ok());
|
||||
let retry_after = retry_after_secs.map(|s| Duration::from_millis(s * 1_000));
|
||||
|
||||
if status == StatusCode::UNAUTHORIZED
|
||||
&& let Some(manager) = auth_manager.as_ref()
|
||||
&& manager.auth().is_some()
|
||||
{
|
||||
let _ = manager.refresh_token().await;
|
||||
}
|
||||
|
||||
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
|
||||
// errors. When we bubble early with only the HTTP status the caller sees an opaque
|
||||
// "unexpected status 400 Bad Request" which makes debugging nearly impossible.
|
||||
// Instead, read (and include) the response text so higher layers and users see the
|
||||
// exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is
|
||||
// small and this branch only runs on error paths so the extra allocation is
|
||||
// negligible.
|
||||
if !(status == StatusCode::TOO_MANY_REQUESTS
|
||||
|| status == StatusCode::UNAUTHORIZED
|
||||
|| status.is_server_error())
|
||||
{
|
||||
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
|
||||
let body = res.text().await.unwrap_or_default();
|
||||
return Err(StreamAttemptError::Fatal(CodexErr::UnexpectedStatus(
|
||||
status, body,
|
||||
)));
|
||||
}
|
||||
|
||||
if status == StatusCode::TOO_MANY_REQUESTS {
|
||||
let rate_limit_snapshot = parse_rate_limit_snapshot(res.headers());
|
||||
let body = res.json::<ErrorResponse>().await.ok();
|
||||
if let Some(ErrorResponse { error }) = body {
|
||||
if error.r#type.as_deref() == Some("usage_limit_reached") {
|
||||
// Prefer the plan_type provided in the error message if present
|
||||
// because it's more up to date than the one encoded in the auth
|
||||
// token.
|
||||
let plan_type = error
|
||||
.plan_type
|
||||
.or_else(|| auth.as_ref().and_then(CodexAuth::get_plan_type));
|
||||
let resets_in_seconds = error.resets_in_seconds;
|
||||
let codex_err = CodexErr::UsageLimitReached(UsageLimitReachedError {
|
||||
plan_type,
|
||||
resets_in_seconds,
|
||||
rate_limits: rate_limit_snapshot,
|
||||
});
|
||||
return Err(StreamAttemptError::Fatal(codex_err));
|
||||
} else if error.r#type.as_deref() == Some("usage_not_included") {
|
||||
return Err(StreamAttemptError::Fatal(CodexErr::UsageNotIncluded));
|
||||
if status == StatusCode::TOO_MANY_REQUESTS {
|
||||
let body = res.json::<ErrorResponse>().await.ok();
|
||||
if let Some(ErrorResponse { error }) = body {
|
||||
if error.r#type.as_deref() == Some("usage_limit_reached") {
|
||||
// Prefer the plan_type provided in the error message if present
|
||||
// because it's more up to date than the one encoded in the auth
|
||||
// token.
|
||||
let plan_type = error
|
||||
.plan_type
|
||||
.or_else(|| auth.as_ref().and_then(|a| a.get_plan_type()));
|
||||
let resets_in_seconds = error.resets_in_seconds;
|
||||
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
|
||||
plan_type,
|
||||
resets_in_seconds,
|
||||
}));
|
||||
} else if error.r#type.as_deref() == Some("usage_not_included") {
|
||||
return Err(CodexErr::UsageNotIncluded);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(StreamAttemptError::RetryableHttpError {
|
||||
status,
|
||||
retry_after,
|
||||
})
|
||||
if attempt > max_retries {
|
||||
if status == StatusCode::INTERNAL_SERVER_ERROR {
|
||||
return Err(CodexErr::InternalServerError);
|
||||
}
|
||||
|
||||
return Err(CodexErr::RetryLimit(status));
|
||||
}
|
||||
|
||||
let delay = retry_after_secs
|
||||
.map(|s| Duration::from_millis(s * 1_000))
|
||||
.unwrap_or_else(|| backoff(attempt));
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt > max_retries {
|
||||
return Err(e.into());
|
||||
}
|
||||
let delay = backoff(attempt);
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
}
|
||||
Err(e) => Err(StreamAttemptError::RetryableTransportError(e.into())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,47 +391,6 @@ impl ModelClient {
|
||||
}
|
||||
}
|
||||
|
||||
enum StreamAttemptError {
|
||||
RetryableHttpError {
|
||||
status: StatusCode,
|
||||
retry_after: Option<Duration>,
|
||||
},
|
||||
RetryableTransportError(CodexErr),
|
||||
Fatal(CodexErr),
|
||||
}
|
||||
|
||||
impl StreamAttemptError {
|
||||
/// attempt is 0-based.
|
||||
fn delay(&self, attempt: u64) -> Duration {
|
||||
// backoff() uses 1-based attempts.
|
||||
let backoff_attempt = attempt + 1;
|
||||
match self {
|
||||
Self::RetryableHttpError { retry_after, .. } => {
|
||||
retry_after.unwrap_or_else(|| backoff(backoff_attempt))
|
||||
}
|
||||
Self::RetryableTransportError { .. } => backoff(backoff_attempt),
|
||||
Self::Fatal(_) => {
|
||||
// Should not be called on Fatal errors.
|
||||
Duration::from_secs(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn into_error(self) -> CodexErr {
|
||||
match self {
|
||||
Self::RetryableHttpError { status, .. } => {
|
||||
if status == StatusCode::INTERNAL_SERVER_ERROR {
|
||||
CodexErr::InternalServerError
|
||||
} else {
|
||||
CodexErr::RetryLimit(status)
|
||||
}
|
||||
}
|
||||
Self::RetryableTransportError(error) => error,
|
||||
Self::Fatal(error) => error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct SseEvent {
|
||||
#[serde(rename = "type")]
|
||||
@@ -474,6 +400,9 @@ struct SseEvent {
|
||||
delta: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseCreated {}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseCompleted {
|
||||
id: String,
|
||||
@@ -544,63 +473,6 @@ fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_rate_limit_snapshot(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
|
||||
let primary = parse_rate_limit_window(
|
||||
headers,
|
||||
"x-codex-primary-used-percent",
|
||||
"x-codex-primary-window-minutes",
|
||||
"x-codex-primary-reset-after-seconds",
|
||||
);
|
||||
|
||||
let secondary = parse_rate_limit_window(
|
||||
headers,
|
||||
"x-codex-secondary-used-percent",
|
||||
"x-codex-secondary-window-minutes",
|
||||
"x-codex-secondary-reset-after-seconds",
|
||||
);
|
||||
|
||||
Some(RateLimitSnapshot { primary, secondary })
|
||||
}
|
||||
|
||||
fn parse_rate_limit_window(
|
||||
headers: &HeaderMap,
|
||||
used_percent_header: &str,
|
||||
window_minutes_header: &str,
|
||||
resets_header: &str,
|
||||
) -> Option<RateLimitWindow> {
|
||||
let used_percent: Option<f64> = parse_header_f64(headers, used_percent_header);
|
||||
|
||||
used_percent.and_then(|used_percent| {
|
||||
let window_minutes = parse_header_u64(headers, window_minutes_header);
|
||||
let resets_in_seconds = parse_header_u64(headers, resets_header);
|
||||
|
||||
let has_data = used_percent != 0.0
|
||||
|| window_minutes.is_some_and(|minutes| minutes != 0)
|
||||
|| resets_in_seconds.is_some_and(|seconds| seconds != 0);
|
||||
|
||||
has_data.then_some(RateLimitWindow {
|
||||
used_percent,
|
||||
window_minutes,
|
||||
resets_in_seconds,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_header_f64(headers: &HeaderMap, name: &str) -> Option<f64> {
|
||||
parse_header_str(headers, name)?
|
||||
.parse::<f64>()
|
||||
.ok()
|
||||
.filter(|v| v.is_finite())
|
||||
}
|
||||
|
||||
fn parse_header_u64(headers: &HeaderMap, name: &str) -> Option<u64> {
|
||||
parse_header_str(headers, name)?.parse::<u64>().ok()
|
||||
}
|
||||
|
||||
fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> {
|
||||
headers.get(name)?.to_str().ok()
|
||||
}
|
||||
|
||||
async fn process_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent>>,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::TokenUsage;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
@@ -10,7 +9,6 @@ use codex_protocol::config_types::Verbosity as VerbosityConfig;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use futures::Stream;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
use std::borrow::Cow;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
@@ -33,17 +31,16 @@ pub struct Prompt {
|
||||
|
||||
/// Optional override for the built-in BASE_INSTRUCTIONS.
|
||||
pub base_instructions_override: Option<String>,
|
||||
|
||||
/// Optional the output schema for the model's response.
|
||||
pub output_schema: Option<Value>,
|
||||
}
|
||||
|
||||
impl Prompt {
|
||||
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
|
||||
pub(crate) fn get_full_instructions(&self, model: &ModelFamily) -> Cow<'_, str> {
|
||||
let base = self
|
||||
.base_instructions_override
|
||||
.as_deref()
|
||||
.unwrap_or(model.base_instructions.deref());
|
||||
let mut sections: Vec<&str> = vec![base];
|
||||
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if:
|
||||
// - the model needs special instructions (4.1)
|
||||
// AND
|
||||
@@ -57,10 +54,9 @@ impl Prompt {
|
||||
&& model.needs_special_apply_patch_instructions
|
||||
&& !is_apply_patch_tool_present
|
||||
{
|
||||
Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}"))
|
||||
} else {
|
||||
Cow::Borrowed(base)
|
||||
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
|
||||
}
|
||||
Cow::Owned(sections.join("\n"))
|
||||
}
|
||||
|
||||
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
|
||||
@@ -83,7 +79,6 @@ pub enum ResponseEvent {
|
||||
WebSearchCallBegin {
|
||||
call_id: String,
|
||||
},
|
||||
RateLimits(RateLimitSnapshot),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
@@ -94,31 +89,14 @@ pub(crate) struct Reasoning {
|
||||
pub(crate) summary: Option<ReasoningSummaryConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Default, Clone)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) enum TextFormatType {
|
||||
#[default]
|
||||
JsonSchema,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Default, Clone)]
|
||||
pub(crate) struct TextFormat {
|
||||
pub(crate) r#type: TextFormatType,
|
||||
pub(crate) strict: bool,
|
||||
pub(crate) schema: Value,
|
||||
pub(crate) name: String,
|
||||
}
|
||||
|
||||
/// Controls under the `text` field in the Responses API for GPT-5.
|
||||
#[derive(Debug, Serialize, Default, Clone)]
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
pub(crate) struct TextControls {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) verbosity: Option<OpenAiVerbosity>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) format: Option<TextFormat>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Default, Clone)]
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum OpenAiVerbosity {
|
||||
Low,
|
||||
@@ -177,20 +155,9 @@ pub(crate) fn create_reasoning_param_for_request(
|
||||
|
||||
pub(crate) fn create_text_param_for_request(
|
||||
verbosity: Option<VerbosityConfig>,
|
||||
output_schema: &Option<Value>,
|
||||
) -> Option<TextControls> {
|
||||
if verbosity.is_none() && output_schema.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(TextControls {
|
||||
verbosity: verbosity.map(std::convert::Into::into),
|
||||
format: output_schema.as_ref().map(|schema| TextFormat {
|
||||
r#type: TextFormatType::JsonSchema,
|
||||
strict: true,
|
||||
schema: schema.clone(),
|
||||
name: "codex_output_schema".to_string(),
|
||||
}),
|
||||
verbosity.map(|v| TextControls {
|
||||
verbosity: Some(v.into()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -248,7 +215,7 @@ mod tests {
|
||||
expects_apply_patch_instructions: false,
|
||||
},
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-5-codex",
|
||||
slug: "swiftfox",
|
||||
expects_apply_patch_instructions: false,
|
||||
},
|
||||
];
|
||||
@@ -287,7 +254,6 @@ mod tests {
|
||||
prompt_cache_key: None,
|
||||
text: Some(TextControls {
|
||||
verbosity: Some(OpenAiVerbosity::Low),
|
||||
format: None,
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -300,52 +266,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serializes_text_schema_with_strict_format() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
let tools: Vec<serde_json::Value> = vec![];
|
||||
let schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": {"type": "string"}
|
||||
},
|
||||
"required": ["answer"],
|
||||
});
|
||||
let text_controls =
|
||||
create_text_param_for_request(None, &Some(schema.clone())).expect("text controls");
|
||||
|
||||
let req = ResponsesApiRequest {
|
||||
model: "gpt-5",
|
||||
instructions: "i",
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning: None,
|
||||
store: false,
|
||||
stream: true,
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: Some(text_controls),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
let text = v.get("text").expect("text field");
|
||||
assert!(text.get("verbosity").is_none());
|
||||
let format = text.get("format").expect("format field");
|
||||
|
||||
assert_eq!(
|
||||
format.get("name"),
|
||||
Some(&serde_json::Value::String("codex_output_schema".into()))
|
||||
);
|
||||
assert_eq!(
|
||||
format.get("type"),
|
||||
Some(&serde_json::Value::String("json_schema".into()))
|
||||
);
|
||||
assert_eq!(format.get("strict"), Some(&serde_json::Value::Bool(true)));
|
||||
assert_eq!(format.get("schema"), Some(&schema));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn omits_text_when_not_set() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::AgentTask;
|
||||
use super::MutexExt;
|
||||
use super::Session;
|
||||
use super::TurnContext;
|
||||
use super::get_last_assistant_message_from_turn;
|
||||
@@ -14,9 +16,9 @@ use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::protocol::InputMessageKind;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
use crate::truncate::truncate_middle;
|
||||
use crate::util::backoff;
|
||||
use askama::Template;
|
||||
use codex_protocol::models::ContentItem;
|
||||
@@ -25,8 +27,8 @@ use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use futures::prelude::*;
|
||||
|
||||
pub const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
|
||||
const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000;
|
||||
pub(super) const COMPACT_TRIGGER_TEXT: &str = "Start Summarization";
|
||||
const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md");
|
||||
|
||||
#[derive(Template)]
|
||||
#[template(path = "compact/history_bridge.md", escape = "none")]
|
||||
@@ -35,32 +37,57 @@ struct HistoryBridgeTemplate<'a> {
|
||||
summary_text: &'a str,
|
||||
}
|
||||
|
||||
pub(crate) async fn run_inline_auto_compact_task(
|
||||
pub(super) fn spawn_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) {
|
||||
let task = AgentTask::compact(
|
||||
sess.clone(),
|
||||
turn_context,
|
||||
sub_id,
|
||||
input,
|
||||
SUMMARIZATION_PROMPT.to_string(),
|
||||
);
|
||||
sess.set_task(task);
|
||||
}
|
||||
|
||||
pub(super) async fn run_inline_auto_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
) {
|
||||
let sub_id = sess.next_internal_sub_id();
|
||||
let input = vec![InputItem::Text {
|
||||
text: SUMMARIZATION_PROMPT.to_string(),
|
||||
text: COMPACT_TRIGGER_TEXT.to_string(),
|
||||
}];
|
||||
run_compact_task_inner(sess, turn_context, sub_id, input).await;
|
||||
run_compact_task_inner(
|
||||
sess,
|
||||
turn_context,
|
||||
sub_id,
|
||||
input,
|
||||
SUMMARIZATION_PROMPT.to_string(),
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(crate) async fn run_compact_task(
|
||||
pub(super) async fn run_compact_task(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) -> Option<String> {
|
||||
let start_event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
}),
|
||||
};
|
||||
sess.send_event(start_event).await;
|
||||
run_compact_task_inner(sess.clone(), turn_context, sub_id.clone(), input).await;
|
||||
None
|
||||
compact_instructions: String,
|
||||
) {
|
||||
run_compact_task_inner(
|
||||
sess,
|
||||
turn_context,
|
||||
sub_id,
|
||||
input,
|
||||
compact_instructions,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn run_compact_task_inner(
|
||||
@@ -68,15 +95,26 @@ async fn run_compact_task_inner(
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
compact_instructions: String,
|
||||
remove_task_on_completion: bool,
|
||||
) {
|
||||
let model_context_window = turn_context.client.get_model_context_window();
|
||||
let start_event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window,
|
||||
}),
|
||||
};
|
||||
sess.send_event(start_event).await;
|
||||
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
let turn_input = sess
|
||||
.turn_input_with_history(vec![initial_input_for_turn.clone().into()])
|
||||
.await;
|
||||
let instructions_override = compact_instructions;
|
||||
let turn_input = sess.turn_input_with_history(vec![initial_input_for_turn.clone().into()]);
|
||||
|
||||
let prompt = Prompt {
|
||||
input: turn_input,
|
||||
..Default::default()
|
||||
tools: Vec::new(),
|
||||
base_instructions_override: Some(instructions_override),
|
||||
};
|
||||
|
||||
let max_retries = turn_context.client.get_provider().stream_max_retries();
|
||||
@@ -93,8 +131,7 @@ async fn run_compact_task_inner(
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
loop {
|
||||
let attempt_result =
|
||||
drain_to_completed(&sess, turn_context.as_ref(), &sub_id, &prompt).await;
|
||||
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
|
||||
|
||||
match attempt_result {
|
||||
Ok(()) => {
|
||||
@@ -130,12 +167,21 @@ async fn run_compact_task_inner(
|
||||
}
|
||||
}
|
||||
|
||||
let history_snapshot = sess.history_snapshot().await;
|
||||
if remove_task_on_completion {
|
||||
sess.remove_task(&sub_id);
|
||||
}
|
||||
let history_snapshot = {
|
||||
let state = sess.state.lock_unchecked();
|
||||
state.history.contents()
|
||||
};
|
||||
let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default();
|
||||
let user_messages = collect_user_messages(&history_snapshot);
|
||||
let initial_context = sess.build_initial_context(turn_context.as_ref());
|
||||
let new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
|
||||
sess.replace_history(new_history).await;
|
||||
{
|
||||
let mut state = sess.state.lock_unchecked();
|
||||
state.history.replace(new_history);
|
||||
}
|
||||
|
||||
let rollout_item = RolloutItem::Compacted(CompactedItem {
|
||||
message: summary_text.clone(),
|
||||
@@ -149,9 +195,16 @@ async fn run_compact_task_inner(
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::TaskComplete(TaskCompleteEvent {
|
||||
last_agent_message: None,
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
}
|
||||
|
||||
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
|
||||
fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
|
||||
let mut pieces = Vec::new();
|
||||
for item in content {
|
||||
match item {
|
||||
@@ -183,7 +236,7 @@ pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_session_prefix_message(text: &str) -> bool {
|
||||
fn is_session_prefix_message(text: &str) -> bool {
|
||||
matches!(
|
||||
InputMessageKind::from(("user", text)),
|
||||
InputMessageKind::UserInstructions | InputMessageKind::EnvironmentContext
|
||||
@@ -196,17 +249,11 @@ pub(crate) fn build_compacted_history(
|
||||
summary_text: &str,
|
||||
) -> Vec<ResponseItem> {
|
||||
let mut history = initial_context;
|
||||
let mut user_messages_text = if user_messages.is_empty() {
|
||||
let user_messages_text = if user_messages.is_empty() {
|
||||
"(none)".to_string()
|
||||
} else {
|
||||
user_messages.join("\n\n")
|
||||
};
|
||||
// Truncate the concatenated prior user messages so the bridge message
|
||||
// stays well under the context window (approx. 4 bytes/token).
|
||||
let max_bytes = COMPACT_USER_MESSAGE_MAX_TOKENS * 4;
|
||||
if user_messages_text.len() > max_bytes {
|
||||
user_messages_text = truncate_middle(&user_messages_text, max_bytes).0;
|
||||
}
|
||||
let summary_text = if summary_text.is_empty() {
|
||||
"(no summary available)".to_string()
|
||||
} else {
|
||||
@@ -230,7 +277,6 @@ pub(crate) fn build_compacted_history(
|
||||
async fn drain_to_completed(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
sub_id: &str,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
@@ -244,14 +290,10 @@ async fn drain_to_completed(
|
||||
};
|
||||
match event {
|
||||
Ok(ResponseEvent::OutputItemDone(item)) => {
|
||||
sess.record_into_history(std::slice::from_ref(&item)).await;
|
||||
let mut state = sess.state.lock_unchecked();
|
||||
state.history.record_items(std::slice::from_ref(&item));
|
||||
}
|
||||
Ok(ResponseEvent::RateLimits(snapshot)) => {
|
||||
sess.update_rate_limits(sub_id, snapshot).await;
|
||||
}
|
||||
Ok(ResponseEvent::Completed { token_usage, .. }) => {
|
||||
sess.update_token_usage_info(sub_id, turn_context, token_usage.as_ref())
|
||||
.await;
|
||||
Ok(ResponseEvent::Completed { .. }) => {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(_) => continue,
|
||||
@@ -355,38 +397,4 @@ mod tests {
|
||||
|
||||
assert_eq!(vec!["real user message".to_string()], collected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_compacted_history_truncates_overlong_user_messages() {
|
||||
// Prepare a very large prior user message so the aggregated
|
||||
// `user_messages_text` exceeds the truncation threshold used by
|
||||
// `build_compacted_history` (80k bytes).
|
||||
let big = "X".repeat(200_000);
|
||||
let history = build_compacted_history(Vec::new(), std::slice::from_ref(&big), "SUMMARY");
|
||||
|
||||
// Expect exactly one bridge message added to history (plus any initial context we provided, which is none).
|
||||
assert_eq!(history.len(), 1);
|
||||
|
||||
// Extract the text content of the bridge message.
|
||||
let bridge_text = match &history[0] {
|
||||
ResponseItem::Message { role, content, .. } if role == "user" => {
|
||||
content_items_to_text(content).unwrap_or_default()
|
||||
}
|
||||
other => panic!("unexpected item in history: {other:?}"),
|
||||
};
|
||||
|
||||
// The bridge should contain the truncation marker and not the full original payload.
|
||||
assert!(
|
||||
bridge_text.contains("tokens truncated"),
|
||||
"expected truncation marker in bridge message"
|
||||
);
|
||||
assert!(
|
||||
!bridge_text.contains(&big),
|
||||
"bridge should not include the full oversized user text"
|
||||
);
|
||||
assert!(
|
||||
bridge_text.contains("SUMMARY"),
|
||||
"bridge should include the provided summary text"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
use crate::bash::parse_bash_lc_plain_commands;
|
||||
|
||||
pub fn command_might_be_dangerous(command: &[String]) -> bool {
|
||||
if is_dangerous_to_call_with_exec(command) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Support `bash -lc "<script>"` where the any part of the script might contain a dangerous command.
|
||||
if let Some(all_commands) = parse_bash_lc_plain_commands(command)
|
||||
&& all_commands
|
||||
.iter()
|
||||
.any(|cmd| is_dangerous_to_call_with_exec(cmd))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn is_dangerous_to_call_with_exec(command: &[String]) -> bool {
|
||||
let cmd0 = command.first().map(String::as_str);
|
||||
|
||||
match cmd0 {
|
||||
Some(cmd) if cmd.ends_with("git") || cmd.ends_with("/git") => {
|
||||
matches!(command.get(1).map(String::as_str), Some("reset" | "rm"))
|
||||
}
|
||||
|
||||
Some("rm") => matches!(command.get(1).map(String::as_str), Some("-f" | "-rf")),
|
||||
|
||||
// for sudo <cmd> simply do the check for <cmd>
|
||||
Some("sudo") => is_dangerous_to_call_with_exec(&command[1..]),
|
||||
|
||||
// ── anything else ─────────────────────────────────────────────────
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn vec_str(items: &[&str]) -> Vec<String> {
|
||||
items.iter().map(std::string::ToString::to_string).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn git_reset_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&["git", "reset"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bash_git_reset_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&[
|
||||
"bash",
|
||||
"-lc",
|
||||
"git reset --hard"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn git_status_is_not_dangerous() {
|
||||
assert!(!command_might_be_dangerous(&vec_str(&["git", "status"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bash_git_status_is_not_dangerous() {
|
||||
assert!(!command_might_be_dangerous(&vec_str(&[
|
||||
"bash",
|
||||
"-lc",
|
||||
"git status"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sudo_git_reset_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&[
|
||||
"sudo", "git", "reset", "--hard"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usr_bin_git_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&[
|
||||
"/usr/bin/git",
|
||||
"reset",
|
||||
"--hard"
|
||||
])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rm_rf_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&["rm", "-rf", "/"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rm_f_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&["rm", "-f", "/"])));
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod is_dangerous_command;
|
||||
pub mod is_safe_command;
|
||||
#[cfg(target_os = "windows")]
|
||||
pub mod windows_safe_commands;
|
||||
@@ -1,25 +0,0 @@
|
||||
// This is a WIP. This will eventually contain a real list of common safe Windows commands.
|
||||
pub fn is_safe_command_windows(_command: &[String]) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::is_safe_command_windows;
|
||||
|
||||
fn vec_str(args: &[&str]) -> Vec<String> {
|
||||
args.iter().map(ToString::to_string).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn everything_is_unsafe() {
|
||||
for cmd in [
|
||||
vec_str(&["powershell.exe", "-NoLogo", "-Command", "echo hello"]),
|
||||
vec_str(&["copy", "foo", "bar"]),
|
||||
vec_str(&["del", "file.txt"]),
|
||||
vec_str(&["powershell.exe", "Get-ChildItem"]),
|
||||
] {
|
||||
assert!(!is_safe_command_windows(&cmd));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config_types::History;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
use crate::config_types::Notifications;
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::config_types::SandboxWorkspaceWrite;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
@@ -27,20 +25,17 @@ use codex_protocol::mcp_protocol::Tools;
|
||||
use codex_protocol::mcp_protocol::UserSavedConfig;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::NamedTempFile;
|
||||
use toml::Value as TomlValue;
|
||||
use toml_edit::Array as TomlArray;
|
||||
use toml_edit::DocumentMut;
|
||||
use toml_edit::Item as TomlItem;
|
||||
use toml_edit::Table as TomlTable;
|
||||
|
||||
const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
|
||||
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
|
||||
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
|
||||
const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5";
|
||||
pub const SWIFTFOX_MEDIUM_MODEL: &str = "swiftfox";
|
||||
pub const SWIFTFOX_MODEL_DISPLAY_NAME: &str = "swiftfox-medium";
|
||||
|
||||
/// Maximum number of bytes of the documentation that will be embedded. Larger
|
||||
/// files are *silently truncated* to this size so we do not take up too much of
|
||||
@@ -55,7 +50,7 @@ pub struct Config {
|
||||
/// Optional override of model selection.
|
||||
pub model: String,
|
||||
|
||||
/// Model used specifically for review sessions. Defaults to "gpt-5-codex".
|
||||
/// Model used specifically for review sessions. Defaults to "gpt-5".
|
||||
pub review_model: String,
|
||||
|
||||
pub model_family: ModelFamily,
|
||||
@@ -119,10 +114,6 @@ pub struct Config {
|
||||
/// If unset the feature is disabled.
|
||||
pub notify: Option<Vec<String>>,
|
||||
|
||||
/// TUI notifications preference. When set, the TUI will send OSC 9 notifications on approvals
|
||||
/// and turn completions when not focused.
|
||||
pub tui_notifications: Notifications,
|
||||
|
||||
/// The directory that should be treated as the current working directory
|
||||
/// for the session. All relative paths inside the business-logic layer are
|
||||
/// resolved against this path.
|
||||
@@ -185,14 +176,8 @@ pub struct Config {
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
pub use_experimental_unified_exec_tool: bool,
|
||||
|
||||
/// If set to `true`, use the experimental official Rust MCP client.
|
||||
/// https://github.com/modelcontextprotocol/rust-sdk
|
||||
pub use_experimental_use_rmcp_client: bool,
|
||||
|
||||
/// Include the `view_image` tool that lets the agent attach a local image path to context.
|
||||
pub include_view_image_tool: bool,
|
||||
/// When true, sessions run inside a linked git worktree under `cwd/codex/<conversation>`.
|
||||
pub enable_git_worktree: bool,
|
||||
|
||||
/// The active profile name used to derive this `Config` (if any).
|
||||
pub active_profile: Option<String>,
|
||||
@@ -280,96 +265,6 @@ pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
) -> std::io::Result<BTreeMap<String, McpServerConfig>> {
|
||||
let root_value = load_config_as_toml(codex_home)?;
|
||||
let Some(servers_value) = root_value.get("mcp_servers") else {
|
||||
return Ok(BTreeMap::new());
|
||||
};
|
||||
|
||||
servers_value
|
||||
.clone()
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
|
||||
pub fn write_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
servers: &BTreeMap<String, McpServerConfig>,
|
||||
) -> std::io::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let mut doc = match std::fs::read_to_string(&config_path) {
|
||||
Ok(contents) => contents
|
||||
.parse::<DocumentMut>()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
doc.as_table_mut().remove("mcp_servers");
|
||||
|
||||
if !servers.is_empty() {
|
||||
let mut table = TomlTable::new();
|
||||
table.set_implicit(true);
|
||||
doc["mcp_servers"] = TomlItem::Table(table);
|
||||
|
||||
for (name, config) in servers {
|
||||
let mut entry = TomlTable::new();
|
||||
entry.set_implicit(false);
|
||||
match &config.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
entry["command"] = toml_edit::value(command.clone());
|
||||
|
||||
if !args.is_empty() {
|
||||
let mut args_array = TomlArray::new();
|
||||
for arg in args {
|
||||
args_array.push(arg.clone());
|
||||
}
|
||||
entry["args"] = TomlItem::Value(args_array.into());
|
||||
}
|
||||
|
||||
if let Some(env) = env
|
||||
&& !env.is_empty()
|
||||
{
|
||||
let mut env_table = TomlTable::new();
|
||||
env_table.set_implicit(false);
|
||||
let mut pairs: Vec<_> = env.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
for (key, value) in pairs {
|
||||
env_table.insert(key, toml_edit::value(value.clone()));
|
||||
}
|
||||
entry["env"] = TomlItem::Table(env_table);
|
||||
}
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
entry["url"] = toml_edit::value(url.clone());
|
||||
if let Some(token) = bearer_token {
|
||||
entry["bearer_token"] = toml_edit::value(token.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(timeout) = config.startup_timeout_sec {
|
||||
entry["startup_timeout_sec"] = toml_edit::value(timeout.as_secs_f64());
|
||||
}
|
||||
|
||||
if let Some(timeout) = config.tool_timeout_sec {
|
||||
entry["tool_timeout_sec"] = toml_edit::value(timeout.as_secs_f64());
|
||||
}
|
||||
|
||||
doc["mcp_servers"][name.as_str()] = TomlItem::Table(entry);
|
||||
}
|
||||
}
|
||||
|
||||
std::fs::create_dir_all(codex_home)?;
|
||||
let tmp_file = NamedTempFile::new_in(codex_home)?;
|
||||
std::fs::write(tmp_file.path(), doc.to_string())?;
|
||||
tmp_file.persist(config_path).map_err(|err| err.error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_project_trusted_inner(doc: &mut DocumentMut, project_path: &Path) -> anyhow::Result<()> {
|
||||
// Ensure we render a human-friendly structure:
|
||||
//
|
||||
@@ -691,9 +586,6 @@ pub struct ConfigToml {
|
||||
/// Defaults to `false`.
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
|
||||
/// Enable per-session git worktree checkouts.
|
||||
pub enable_git_worktree: Option<bool>,
|
||||
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
@@ -713,7 +605,6 @@ pub struct ConfigToml {
|
||||
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
|
||||
pub projects: Option<HashMap<String, ProjectConfig>>,
|
||||
|
||||
@@ -864,7 +755,6 @@ pub struct ConfigOverrides {
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
pub tools_web_search_request: Option<bool>,
|
||||
pub enable_git_worktree: Option<bool>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -893,7 +783,6 @@ impl Config {
|
||||
include_view_image_tool,
|
||||
show_raw_agent_reasoning,
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
enable_git_worktree: override_enable_git_worktree,
|
||||
} = overrides;
|
||||
|
||||
let active_profile_name = config_profile_key
|
||||
@@ -967,10 +856,6 @@ impl Config {
|
||||
.or(cfg.tools.as_ref().and_then(|t| t.view_image))
|
||||
.unwrap_or(true);
|
||||
|
||||
let enable_git_worktree = override_enable_git_worktree
|
||||
.or(cfg.enable_git_worktree)
|
||||
.unwrap_or(false);
|
||||
|
||||
let model = model
|
||||
.or(config_profile.model)
|
||||
.or(cfg.model)
|
||||
@@ -1070,16 +955,9 @@ impl Config {
|
||||
use_experimental_unified_exec_tool: cfg
|
||||
.experimental_use_unified_exec_tool
|
||||
.unwrap_or(false),
|
||||
use_experimental_use_rmcp_client: cfg.experimental_use_rmcp_client.unwrap_or(false),
|
||||
include_view_image_tool,
|
||||
enable_git_worktree,
|
||||
active_profile: active_profile_name,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
tui_notifications: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.map(|t| t.notifications.clone())
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
Ok(config)
|
||||
}
|
||||
@@ -1190,12 +1068,10 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config_types::HistoryPersistence;
|
||||
use crate::config_types::Notifications;
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
@@ -1230,19 +1106,6 @@ persistence = "none"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tui_config_missing_notifications_field_defaults_to_disabled() {
|
||||
let cfg = r#"
|
||||
[tui]
|
||||
"#;
|
||||
|
||||
let parsed = toml::from_str::<ConfigToml>(cfg)
|
||||
.expect("TUI config without notifications should succeed");
|
||||
let tui = parsed.tui.expect("config should include tui section");
|
||||
|
||||
assert_eq!(tui.notifications, Notifications::Enabled(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sandbox_config_parsing() {
|
||||
let sandbox_full_access = r#"
|
||||
@@ -1299,208 +1162,6 @@ exclude_slash_tmp = true
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_round_trips_entries() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut servers = BTreeMap::new();
|
||||
servers.insert(
|
||||
"docs".to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string()],
|
||||
env: None,
|
||||
},
|
||||
startup_timeout_sec: Some(Duration::from_secs(3)),
|
||||
tool_timeout_sec: Some(Duration::from_secs(5)),
|
||||
},
|
||||
);
|
||||
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
assert_eq!(loaded.len(), 1);
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
assert_eq!(command, "echo");
|
||||
assert_eq!(args, &vec!["hello".to_string()]);
|
||||
assert!(env.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport {other:?}"),
|
||||
}
|
||||
assert_eq!(docs.startup_timeout_sec, Some(Duration::from_secs(3)));
|
||||
assert_eq!(docs.tool_timeout_sec, Some(Duration::from_secs(5)));
|
||||
|
||||
let empty = BTreeMap::new();
|
||||
write_global_mcp_servers(codex_home.path(), &empty)?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
assert!(loaded.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_global_mcp_servers_accepts_legacy_ms_field() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
std::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
[mcp_servers]
|
||||
[mcp_servers.docs]
|
||||
command = "echo"
|
||||
startup_timeout_ms = 2500
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let docs = servers.get("docs").expect("docs entry");
|
||||
assert_eq!(docs.startup_timeout_sec, Some(Duration::from_millis(2500)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let servers = BTreeMap::from([(
|
||||
"docs".to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: "docs-server".to_string(),
|
||||
args: vec!["--verbose".to_string()],
|
||||
env: Some(HashMap::from([
|
||||
("ZIG_VAR".to_string(), "3".to_string()),
|
||||
("ALPHA_VAR".to_string(), "1".to_string()),
|
||||
])),
|
||||
},
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
},
|
||||
)]);
|
||||
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let serialized = std::fs::read_to_string(&config_path)?;
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"[mcp_servers.docs]
|
||||
command = "docs-server"
|
||||
args = ["--verbose"]
|
||||
|
||||
[mcp_servers.docs.env]
|
||||
ALPHA_VAR = "1"
|
||||
ZIG_VAR = "3"
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
assert_eq!(command, "docs-server");
|
||||
assert_eq!(args, &vec!["--verbose".to_string()]);
|
||||
let env = env
|
||||
.as_ref()
|
||||
.expect("env should be preserved for stdio transport");
|
||||
assert_eq!(env.get("ALPHA_VAR"), Some(&"1".to_string()));
|
||||
assert_eq!(env.get("ZIG_VAR"), Some(&"3".to_string()));
|
||||
}
|
||||
other => panic!("unexpected transport {other:?}"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_serializes_streamable_http() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut servers = BTreeMap::from([(
|
||||
"docs".to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: Some("secret-token".to_string()),
|
||||
},
|
||||
startup_timeout_sec: Some(Duration::from_secs(2)),
|
||||
tool_timeout_sec: None,
|
||||
},
|
||||
)]);
|
||||
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let serialized = std::fs::read_to_string(&config_path)?;
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"[mcp_servers.docs]
|
||||
url = "https://example.com/mcp"
|
||||
bearer_token = "secret-token"
|
||||
startup_timeout_sec = 2.0
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
assert_eq!(url, "https://example.com/mcp");
|
||||
assert_eq!(bearer_token.as_deref(), Some("secret-token"));
|
||||
}
|
||||
other => panic!("unexpected transport {other:?}"),
|
||||
}
|
||||
assert_eq!(docs.startup_timeout_sec, Some(Duration::from_secs(2)));
|
||||
|
||||
servers.insert(
|
||||
"docs".to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: None,
|
||||
},
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
},
|
||||
);
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let serialized = std::fs::read_to_string(&config_path)?;
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"[mcp_servers.docs]
|
||||
url = "https://example.com/mcp"
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
assert_eq!(url, "https://example.com/mcp");
|
||||
assert!(bearer_token.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport {other:?}"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_defaults() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -1508,7 +1169,7 @@ url = "https://example.com/mcp"
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
None,
|
||||
"gpt-5-codex",
|
||||
"swiftfox",
|
||||
Some(ReasoningEffort::High),
|
||||
)
|
||||
.await?;
|
||||
@@ -1517,7 +1178,7 @@ url = "https://example.com/mcp"
|
||||
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
assert_eq!(parsed.model.as_deref(), Some("gpt-5-codex"));
|
||||
assert_eq!(parsed.model.as_deref(), Some("swiftfox"));
|
||||
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
|
||||
Ok(())
|
||||
@@ -1531,7 +1192,7 @@ url = "https://example.com/mcp"
|
||||
tokio::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.dev]
|
||||
@@ -1571,7 +1232,7 @@ model = "gpt-4.1"
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
Some("dev"),
|
||||
"gpt-5-codex",
|
||||
"swiftfox",
|
||||
Some(ReasoningEffort::Medium),
|
||||
)
|
||||
.await?;
|
||||
@@ -1584,7 +1245,7 @@ model = "gpt-4.1"
|
||||
.get("dev")
|
||||
.expect("profile should be created");
|
||||
|
||||
assert_eq!(profile.model.as_deref(), Some("gpt-5-codex"));
|
||||
assert_eq!(profile.model.as_deref(), Some("swiftfox"));
|
||||
assert_eq!(
|
||||
profile.model_reasoning_effort,
|
||||
Some(ReasoningEffort::Medium)
|
||||
@@ -1606,7 +1267,7 @@ model = "gpt-4"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.prod]
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
@@ -1637,7 +1298,7 @@ model = "gpt-5-codex"
|
||||
.profiles
|
||||
.get("prod")
|
||||
.and_then(|profile| profile.model.as_deref()),
|
||||
Some("gpt-5-codex"),
|
||||
Some("gpt-5"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@@ -1784,7 +1445,7 @@ model_verbosity = "high"
|
||||
assert_eq!(
|
||||
Config {
|
||||
model: "o3".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
review_model: "gpt-5".to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
@@ -1816,12 +1477,9 @@ model_verbosity = "high"
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
enable_git_worktree: false,
|
||||
active_profile: Some("o3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -1844,7 +1502,7 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_gpt3_profile_config = Config {
|
||||
model: "gpt-3.5-turbo".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
review_model: "gpt-5".to_string(),
|
||||
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
|
||||
model_context_window: Some(16_385),
|
||||
model_max_output_tokens: Some(4_096),
|
||||
@@ -1876,12 +1534,9 @@ model_verbosity = "high"
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
enable_git_worktree: false,
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -1919,7 +1574,7 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_zdr_profile_config = Config {
|
||||
model: "o3".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
review_model: "gpt-5".to_string(),
|
||||
model_family: find_family_for_model("o3").expect("known model slug"),
|
||||
model_context_window: Some(200_000),
|
||||
model_max_output_tokens: Some(100_000),
|
||||
@@ -1951,12 +1606,9 @@ model_verbosity = "high"
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
enable_git_worktree: false,
|
||||
active_profile: Some("zdr".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
@@ -1980,7 +1632,7 @@ model_verbosity = "high"
|
||||
)?;
|
||||
let expected_gpt5_profile_config = Config {
|
||||
model: "gpt-5".to_string(),
|
||||
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
|
||||
review_model: "gpt-5".to_string(),
|
||||
model_family: find_family_for_model("gpt-5").expect("known model slug"),
|
||||
model_context_window: Some(272_000),
|
||||
model_max_output_tokens: Some(128_000),
|
||||
@@ -2012,12 +1664,9 @@ model_verbosity = "high"
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
include_view_image_tool: true,
|
||||
enable_git_worktree: false,
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt5_profile_config, gpt5_profile_config);
|
||||
@@ -2121,46 +1770,3 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod notifications_tests {
|
||||
use crate::config_types::Notifications;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
struct TuiTomlTest {
|
||||
notifications: Notifications,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
struct RootTomlTest {
|
||||
tui: TuiTomlTest,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tui_notifications_true() {
|
||||
let toml = r#"
|
||||
[tui]
|
||||
notifications = true
|
||||
"#;
|
||||
let parsed: RootTomlTest = toml::from_str(toml).expect("deserialize notifications=true");
|
||||
assert!(matches!(
|
||||
parsed.tui.notifications,
|
||||
Notifications::Enabled(true)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tui_notifications_custom_array() {
|
||||
let toml = r#"
|
||||
[tui]
|
||||
notifications = ["foo"]
|
||||
"#;
|
||||
let parsed: RootTomlTest =
|
||||
toml::from_str(toml).expect("deserialize notifications=[\"foo\"]");
|
||||
assert!(matches!(
|
||||
parsed.tui.notifications,
|
||||
Notifications::Custom(ref v) if v == &vec!["foo".to_string()]
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ async fn persist_overrides_with_behavior(
|
||||
} else {
|
||||
doc.get("profile")
|
||||
.and_then(|i| i.as_str())
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
};
|
||||
|
||||
let mut mutated = false;
|
||||
@@ -228,7 +228,7 @@ mod tests {
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5"),
|
||||
(&[CONFIG_KEY_EFFORT], "high"),
|
||||
],
|
||||
)
|
||||
@@ -236,7 +236,7 @@ mod tests {
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"model = "gpt-5-codex"
|
||||
let expected = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
@@ -348,7 +348,7 @@ model_reasoning_effort = "high"
|
||||
&[
|
||||
(&["a", "b", "c"], "v"),
|
||||
(&["x"], "y"),
|
||||
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5-codex"),
|
||||
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5"),
|
||||
],
|
||||
)
|
||||
.await
|
||||
@@ -361,7 +361,7 @@ model_reasoning_effort = "high"
|
||||
c = "v"
|
||||
|
||||
[profiles.p1]
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
@@ -454,7 +454,7 @@ existing = "keep"
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
|
||||
(&[CONFIG_KEY_MODEL], "gpt-5"),
|
||||
(&[CONFIG_KEY_EFFORT], "minimal"),
|
||||
],
|
||||
)
|
||||
@@ -466,7 +466,7 @@ existing = "keep"
|
||||
# should be preserved
|
||||
|
||||
existing = "keep"
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
@@ -524,7 +524,7 @@ model = "o3"
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
// Seed with a model value only
|
||||
let seed = "model = \"gpt-5-codex\"\n";
|
||||
let seed = "model = \"gpt-5\"\n";
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
.expect("seed write");
|
||||
@@ -535,7 +535,7 @@ model = "o3"
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"model = "gpt-5-codex"
|
||||
let expected = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "high"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
@@ -579,7 +579,7 @@ model = "o4-mini"
|
||||
|
||||
// No active profile key; we'll target an explicit override
|
||||
let seed = r#"[profiles.team]
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
.await
|
||||
@@ -595,7 +595,7 @@ model = "gpt-5-codex"
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = r#"[profiles.team]
|
||||
model = "gpt-5-codex"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "minimal"
|
||||
"#;
|
||||
assert_eq!(contents, expected);
|
||||
@@ -611,7 +611,7 @@ model_reasoning_effort = "minimal"
|
||||
codex_home,
|
||||
None,
|
||||
&[
|
||||
(&[CONFIG_KEY_MODEL], Some("gpt-5-codex")),
|
||||
(&[CONFIG_KEY_MODEL], Some("gpt-5")),
|
||||
(&[CONFIG_KEY_EFFORT], None),
|
||||
],
|
||||
)
|
||||
@@ -619,7 +619,7 @@ model_reasoning_effort = "minimal"
|
||||
.expect("persist");
|
||||
|
||||
let contents = read_config(codex_home).await;
|
||||
let expected = "model = \"gpt-5-codex\"\n";
|
||||
let expected = "model = \"gpt-5\"\n";
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
@@ -670,7 +670,7 @@ model = "o3"
|
||||
let tmpdir = tempdir().expect("tmp");
|
||||
let codex_home = tmpdir.path();
|
||||
|
||||
let seed = r#"model = "gpt-5-codex"
|
||||
let seed = r#"model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
"#;
|
||||
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
|
||||
|
||||
@@ -3,168 +3,25 @@
|
||||
// Note this file should generally be restricted to simple struct/enum
|
||||
// definitions that do not contain business logic.
|
||||
|
||||
use serde::Deserializer;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
use wildmatch::WildMatchPattern;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde::de::Error as SerdeError;
|
||||
|
||||
#[derive(Serialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct McpServerConfig {
|
||||
#[serde(flatten)]
|
||||
pub transport: McpServerTransportConfig,
|
||||
pub command: String,
|
||||
|
||||
/// Startup timeout in seconds for initializing MCP server & initially listing tools.
|
||||
#[serde(
|
||||
default,
|
||||
with = "option_duration_secs",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
pub startup_timeout_sec: Option<Duration>,
|
||||
#[serde(default)]
|
||||
pub args: Vec<String>,
|
||||
|
||||
/// Default timeout for MCP tool calls initiated via this server.
|
||||
#[serde(default, with = "option_duration_secs")]
|
||||
pub tool_timeout_sec: Option<Duration>,
|
||||
}
|
||||
#[serde(default)]
|
||||
pub env: Option<HashMap<String, String>>,
|
||||
|
||||
impl<'de> Deserialize<'de> for McpServerConfig {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
struct RawMcpServerConfig {
|
||||
command: Option<String>,
|
||||
#[serde(default)]
|
||||
args: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
env: Option<HashMap<String, String>>,
|
||||
|
||||
url: Option<String>,
|
||||
bearer_token: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
startup_timeout_sec: Option<f64>,
|
||||
#[serde(default)]
|
||||
startup_timeout_ms: Option<u64>,
|
||||
#[serde(default, with = "option_duration_secs")]
|
||||
tool_timeout_sec: Option<Duration>,
|
||||
}
|
||||
|
||||
let raw = RawMcpServerConfig::deserialize(deserializer)?;
|
||||
|
||||
let startup_timeout_sec = match (raw.startup_timeout_sec, raw.startup_timeout_ms) {
|
||||
(Some(sec), _) => {
|
||||
let duration = Duration::try_from_secs_f64(sec).map_err(SerdeError::custom)?;
|
||||
Some(duration)
|
||||
}
|
||||
(None, Some(ms)) => Some(Duration::from_millis(ms)),
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
fn throw_if_set<E, T>(transport: &str, field: &str, value: Option<&T>) -> Result<(), E>
|
||||
where
|
||||
E: SerdeError,
|
||||
{
|
||||
if value.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
Err(E::custom(format!(
|
||||
"{field} is not supported for {transport}",
|
||||
)))
|
||||
}
|
||||
|
||||
let transport = match raw {
|
||||
RawMcpServerConfig {
|
||||
command: Some(command),
|
||||
args,
|
||||
env,
|
||||
url,
|
||||
bearer_token,
|
||||
..
|
||||
} => {
|
||||
throw_if_set("stdio", "url", url.as_ref())?;
|
||||
throw_if_set("stdio", "bearer_token", bearer_token.as_ref())?;
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args: args.unwrap_or_default(),
|
||||
env,
|
||||
}
|
||||
}
|
||||
RawMcpServerConfig {
|
||||
url: Some(url),
|
||||
bearer_token,
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
..
|
||||
} => {
|
||||
throw_if_set("streamable_http", "command", command.as_ref())?;
|
||||
throw_if_set("streamable_http", "args", args.as_ref())?;
|
||||
throw_if_set("streamable_http", "env", env.as_ref())?;
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token }
|
||||
}
|
||||
_ => return Err(SerdeError::custom("invalid transport")),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
transport,
|
||||
startup_timeout_sec,
|
||||
tool_timeout_sec: raw.tool_timeout_sec,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(untagged, deny_unknown_fields, rename_all = "snake_case")]
|
||||
pub enum McpServerTransportConfig {
|
||||
/// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#stdio
|
||||
Stdio {
|
||||
command: String,
|
||||
#[serde(default)]
|
||||
args: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
env: Option<HashMap<String, String>>,
|
||||
},
|
||||
/// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#streamable-http
|
||||
StreamableHttp {
|
||||
url: String,
|
||||
/// A plain text bearer token to use for authentication.
|
||||
/// This bearer token will be included in the HTTP request header as an `Authorization: Bearer <token>` header.
|
||||
/// This should be used with caution because it lives on disk in clear text.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
bearer_token: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
mod option_duration_secs {
|
||||
use serde::Deserialize;
|
||||
use serde::Deserializer;
|
||||
use serde::Serializer;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn serialize<S>(value: &Option<Duration>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match value {
|
||||
Some(duration) => serializer.serialize_some(&duration.as_secs_f64()),
|
||||
None => serializer.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let secs = Option::<f64>::deserialize(deserializer)?;
|
||||
secs.map(|secs| Duration::try_from_secs_f64(secs).map_err(serde::de::Error::custom))
|
||||
.transpose()
|
||||
}
|
||||
/// Startup timeout in milliseconds for initializing MCP server & initially listing tools.
|
||||
#[serde(default)]
|
||||
pub startup_timeout_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
@@ -219,27 +76,9 @@ pub enum HistoryPersistence {
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
Custom(Vec<String>),
|
||||
}
|
||||
|
||||
impl Default for Notifications {
|
||||
fn default() -> Self {
|
||||
Self::Enabled(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `false`.
|
||||
#[serde(default)]
|
||||
pub notifications: Notifications,
|
||||
}
|
||||
pub struct Tui {}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
@@ -367,139 +206,3 @@ pub enum ReasoningSummaryFormat {
|
||||
None,
|
||||
Experimental,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn deserialize_stdio_command_server_config() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize command config");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec![],
|
||||
env: None
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_stdio_command_server_config_with_args() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
args = ["hello", "world"]
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize command config");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
env: None
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_stdio_command_server_config_with_arg_with_args_and_env() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
args = ["hello", "world"]
|
||||
env = { "FOO" = "BAR" }
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize command config");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
env: Some(HashMap::from([("FOO".to_string(), "BAR".to_string())]))
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_streamable_http_server_config() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
url = "https://example.com/mcp"
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize http config");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: None
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_streamable_http_server_config_with_bearer_token() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
url = "https://example.com/mcp"
|
||||
bearer_token = "secret"
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize http config");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: Some("secret".to_string())
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_rejects_command_and_url() {
|
||||
toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
command = "echo"
|
||||
url = "https://example.com"
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject command+url");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_rejects_env_for_http_transport() {
|
||||
toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
url = "https://example.com"
|
||||
env = { "FOO" = "BAR" }
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject env for http transport");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_rejects_bearer_token_for_stdio_transport() {
|
||||
toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
command = "echo"
|
||||
bearer_token = "secret"
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject bearer token for stdio transport");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,9 +47,8 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::Reasoning { .. }
|
||||
| ResponseItem::WebSearchCall { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
| ResponseItem::Reasoning { .. } => true,
|
||||
ResponseItem::WebSearchCall { .. } | ResponseItem::Other => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ use crate::CodexAuth;
|
||||
use crate::codex::Codex;
|
||||
use crate::codex::CodexSpawnOk;
|
||||
use crate::codex::INITIAL_SUBMIT_ID;
|
||||
use crate::codex::compact::content_items_to_text;
|
||||
use crate::codex::compact::is_session_prefix_message;
|
||||
use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
@@ -136,19 +134,19 @@ impl ConversationManager {
|
||||
self.conversations.write().await.remove(conversation_id)
|
||||
}
|
||||
|
||||
/// Fork an existing conversation by taking messages up to the given position
|
||||
/// (not including the message at the given position) and starting a new
|
||||
/// Fork an existing conversation by dropping the last `drop_last_messages`
|
||||
/// user/assistant messages from its transcript and starting a new
|
||||
/// conversation with identical configuration (unless overridden by the
|
||||
/// caller's `config`). The new conversation will have a fresh id.
|
||||
pub async fn fork_conversation(
|
||||
&self,
|
||||
nth_user_message: usize,
|
||||
num_messages_to_drop: usize,
|
||||
config: Config,
|
||||
path: PathBuf,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// Compute the prefix up to the cut point.
|
||||
let history = RolloutRecorder::get_rollout_history(&path).await?;
|
||||
let history = truncate_before_nth_user_message(history, nth_user_message);
|
||||
let history = truncate_after_dropping_last_messages(history, num_messages_to_drop);
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
@@ -161,30 +159,33 @@ impl ConversationManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a prefix of `items` obtained by cutting strictly before the nth user message
|
||||
/// (0-based) and all items that follow it.
|
||||
fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> InitialHistory {
|
||||
// Work directly on rollout items, and cut the vector at the nth user message input.
|
||||
/// Return a prefix of `items` obtained by dropping the last `n` user messages
|
||||
/// and all items that follow them.
|
||||
fn truncate_after_dropping_last_messages(history: InitialHistory, n: usize) -> InitialHistory {
|
||||
if n == 0 {
|
||||
return InitialHistory::Forked(history.get_rollout_items());
|
||||
}
|
||||
|
||||
// Work directly on rollout items, and cut the vector at the nth-from-last user message input.
|
||||
let items: Vec<RolloutItem> = history.get_rollout_items();
|
||||
|
||||
// Find indices of user message inputs in rollout order.
|
||||
let mut user_positions: Vec<usize> = Vec::new();
|
||||
for (idx, item) in items.iter().enumerate() {
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = item
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, .. }) = item
|
||||
&& role == "user"
|
||||
&& content_items_to_text(content).is_some_and(|text| !is_session_prefix_message(&text))
|
||||
{
|
||||
user_positions.push(idx);
|
||||
}
|
||||
}
|
||||
|
||||
// If fewer than or equal to n user messages exist, treat as empty (out of range).
|
||||
if user_positions.len() <= n {
|
||||
// If fewer than n user messages exist, treat as empty.
|
||||
if user_positions.len() < n {
|
||||
return InitialHistory::New;
|
||||
}
|
||||
|
||||
// Cut strictly before the nth user message (do not keep the nth itself).
|
||||
let cut_idx = user_positions[n];
|
||||
// Cut strictly before the nth-from-last user message (do not keep the nth itself).
|
||||
let cut_idx = user_positions[user_positions.len() - n];
|
||||
let rolled: Vec<RolloutItem> = items.into_iter().take(cut_idx).collect();
|
||||
|
||||
if rolled.is_empty() {
|
||||
@@ -197,11 +198,9 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::codex::make_session_and_context;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn user_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
@@ -253,7 +252,7 @@ mod tests {
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(initial), 1);
|
||||
let truncated = truncate_after_dropping_last_messages(InitialHistory::Forked(initial), 1);
|
||||
let got_items = truncated.get_rollout_items();
|
||||
let expected_items = vec![
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
@@ -270,37 +269,7 @@ mod tests {
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated2 = truncate_before_nth_user_message(InitialHistory::Forked(initial2), 2);
|
||||
let truncated2 = truncate_after_dropping_last_messages(InitialHistory::Forked(initial2), 2);
|
||||
assert!(matches!(truncated2, InitialHistory::New));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_session_prefix_messages_when_truncating() {
|
||||
let (session, turn_context) = make_session_and_context();
|
||||
let mut items = session.build_initial_context(&turn_context);
|
||||
items.push(user_msg("feature request"));
|
||||
items.push(assistant_msg("ack"));
|
||||
items.push(user_msg("second question"));
|
||||
items.push(assistant_msg("answer"));
|
||||
|
||||
let rollout_items: Vec<RolloutItem> = items
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
|
||||
let truncated = truncate_before_nth_user_message(InitialHistory::Forked(rollout_items), 1);
|
||||
let got_items = truncated.get_rollout_items();
|
||||
|
||||
let expected: Vec<RolloutItem> = vec![
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
RolloutItem::ResponseItem(items[1].clone()),
|
||||
RolloutItem::ResponseItem(items[2].clone()),
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
serde_json::to_value(&got_items).unwrap(),
|
||||
serde_json::to_value(&expected).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ pub async fn discover_prompts_in_excluding(
|
||||
let Some(name) = path
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str())
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
|
||||
use reqwest::header::HeaderValue;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::Mutex;
|
||||
@@ -21,6 +20,7 @@ use std::sync::Mutex;
|
||||
pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None));
|
||||
|
||||
pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Originator {
|
||||
pub value: String,
|
||||
@@ -112,25 +112,17 @@ pub fn create_client() -> reqwest::Client {
|
||||
headers.insert("originator", ORIGINATOR.header_value.clone());
|
||||
let ua = get_codex_user_agent();
|
||||
|
||||
let mut builder = reqwest::Client::builder()
|
||||
reqwest::Client::builder()
|
||||
// Set UA via dedicated helper to avoid header validation pitfalls
|
||||
.user_agent(ua)
|
||||
.default_headers(headers);
|
||||
if is_sandboxed() {
|
||||
builder = builder.no_proxy();
|
||||
}
|
||||
|
||||
builder.build().unwrap_or_else(|_| reqwest::Client::new())
|
||||
}
|
||||
|
||||
fn is_sandboxed() -> bool {
|
||||
std::env::var(CODEX_SANDBOX_ENV_VAR).as_deref() == Ok("seatbelt")
|
||||
.default_headers(headers)
|
||||
.build()
|
||||
.unwrap_or_else(|_| reqwest::Client::new())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use core_test_support::skip_if_no_network;
|
||||
|
||||
#[test]
|
||||
fn test_get_codex_user_agent() {
|
||||
@@ -140,8 +132,6 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_client_sets_default_headers() {
|
||||
skip_if_no_network!();
|
||||
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
|
||||
@@ -2,7 +2,6 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display as DeriveDisplay;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
@@ -72,39 +71,6 @@ impl EnvironmentContext {
|
||||
shell,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compares two environment contexts, ignoring the shell. Useful when
|
||||
/// comparing turn to turn, since the initial environment_context will
|
||||
/// include the shell, and then it is not configurable from turn to turn.
|
||||
pub fn equals_except_shell(&self, other: &EnvironmentContext) -> bool {
|
||||
let EnvironmentContext {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
network_access,
|
||||
writable_roots,
|
||||
// should compare all fields except shell
|
||||
shell: _,
|
||||
} = other;
|
||||
|
||||
self.cwd == *cwd
|
||||
&& self.approval_policy == *approval_policy
|
||||
&& self.sandbox_mode == *sandbox_mode
|
||||
&& self.network_access == *network_access
|
||||
&& self.writable_roots == *writable_roots
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&TurnContext> for EnvironmentContext {
|
||||
fn from(turn_context: &TurnContext) -> Self {
|
||||
Self::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
@@ -174,9 +140,6 @@ impl From<EnvironmentContext> for ResponseItem {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::shell::BashShell;
|
||||
use crate::shell::ZshShell;
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
@@ -247,82 +210,4 @@ mod tests {
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_approval_policy() {
|
||||
// Approval policy
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::Never),
|
||||
Some(workspace_write_policy(vec!["/repo"], true)),
|
||||
None,
|
||||
);
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_sandbox_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_read_only_policy()),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_workspace_write_policy()),
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_workspace_write_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)),
|
||||
None,
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], true)),
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_ignores_shell() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Bash(BashShell {
|
||||
shell_path: "/bin/bash".into(),
|
||||
bashrc_path: "/home/user/.bashrc".into(),
|
||||
})),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Some(Shell::Zsh(ZshShell {
|
||||
shell_path: "/bin/zsh".into(),
|
||||
zshrc_path: "/home/user/.zshrc".into(),
|
||||
})),
|
||||
);
|
||||
|
||||
assert!(context1.equals_except_shell(&context2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ use crate::exec::ExecToolCallOutput;
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use reqwest::StatusCode;
|
||||
use serde_json;
|
||||
use std::io;
|
||||
@@ -105,9 +104,6 @@ pub enum CodexErr {
|
||||
#[error("codex-linux-sandbox was required but not provided")]
|
||||
LandlockSandboxExecutableNotProvided,
|
||||
|
||||
#[error("unsupported operation: {0}")]
|
||||
UnsupportedOperation(String),
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// Automatic conversions for common external error types
|
||||
// -----------------------------------------------------------------
|
||||
@@ -139,7 +135,6 @@ pub enum CodexErr {
|
||||
pub struct UsageLimitReachedError {
|
||||
pub(crate) plan_type: Option<PlanType>,
|
||||
pub(crate) resets_in_seconds: Option<u64>,
|
||||
pub(crate) rate_limits: Option<RateLimitSnapshot>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for UsageLimitReachedError {
|
||||
@@ -156,7 +151,7 @@ impl std::fmt::Display for UsageLimitReachedError {
|
||||
)
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Free)) => {
|
||||
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
.to_string()
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
@@ -267,29 +262,12 @@ pub fn get_error_message_ui(e: &CodexErr) -> String {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::protocol::RateLimitWindow;
|
||||
|
||||
fn rate_limit_snapshot() -> RateLimitSnapshot {
|
||||
RateLimitSnapshot {
|
||||
primary: Some(RateLimitWindow {
|
||||
used_percent: 50.0,
|
||||
window_minutes: Some(60),
|
||||
resets_in_seconds: Some(3600),
|
||||
}),
|
||||
secondary: Some(RateLimitWindow {
|
||||
used_percent: 30.0,
|
||||
window_minutes: Some(120),
|
||||
resets_in_seconds: Some(7200),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_plus_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
resets_in_seconds: None,
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -302,11 +280,10 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Free)),
|
||||
resets_in_seconds: Some(3600),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
);
|
||||
}
|
||||
|
||||
@@ -315,7 +292,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_in_seconds: None,
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -328,7 +304,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Team)),
|
||||
resets_in_seconds: Some(3600),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -341,7 +316,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Business)),
|
||||
resets_in_seconds: None,
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -354,7 +328,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
resets_in_seconds: None,
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -367,7 +340,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_in_seconds: Some(5 * 60),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -380,7 +352,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
resets_in_seconds: Some(3 * 3600 + 32 * 60),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -393,7 +364,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_in_seconds: Some(2 * 86_400 + 3 * 3600 + 5 * 60),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -406,7 +376,6 @@ mod tests {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: None,
|
||||
resets_in_seconds: Some(30),
|
||||
rate_limits: Some(rate_limit_snapshot()),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::os::unix::process::ExitStatusExt;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::process::ExitStatus;
|
||||
use std::time::Duration;
|
||||
@@ -45,7 +44,7 @@ const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
|
||||
/// Aggregation still collects full output; only the live event stream is capped.
|
||||
pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExecParams {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
@@ -83,7 +82,6 @@ pub async fn process_exec_tool_call(
|
||||
params: ExecParams,
|
||||
sandbox_type: SandboxType,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_cwd: &Path,
|
||||
codex_linux_sandbox_exe: &Option<PathBuf>,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<ExecToolCallOutput> {
|
||||
@@ -96,16 +94,12 @@ pub async fn process_exec_tool_call(
|
||||
SandboxType::None => exec(params, sandbox_policy, stdout_stream.clone()).await,
|
||||
SandboxType::MacosSeatbelt => {
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd: command_cwd,
|
||||
env,
|
||||
..
|
||||
command, cwd, env, ..
|
||||
} = params;
|
||||
let child = spawn_command_under_seatbelt(
|
||||
command,
|
||||
command_cwd,
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
cwd,
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
env,
|
||||
)
|
||||
@@ -114,10 +108,7 @@ pub async fn process_exec_tool_call(
|
||||
}
|
||||
SandboxType::LinuxSeccomp => {
|
||||
let ExecParams {
|
||||
command,
|
||||
cwd: command_cwd,
|
||||
env,
|
||||
..
|
||||
command, cwd, env, ..
|
||||
} = params;
|
||||
|
||||
let codex_linux_sandbox_exe = codex_linux_sandbox_exe
|
||||
@@ -126,9 +117,8 @@ pub async fn process_exec_tool_call(
|
||||
let child = spawn_command_under_linux_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
command_cwd,
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
cwd,
|
||||
StdioPolicy::RedirectForShellTool,
|
||||
env,
|
||||
)
|
||||
|
||||
@@ -11,6 +11,9 @@ pub(crate) struct ExecCommandSession {
|
||||
/// Broadcast stream of output chunks read from the PTY. New subscribers
|
||||
/// receive only chunks emitted after they subscribe.
|
||||
output_tx: broadcast::Sender<Vec<u8>>,
|
||||
/// Receiver subscribed before the child process starts emitting output so
|
||||
/// the first caller can consume any early data without races.
|
||||
initial_output_rx: StdMutex<Option<broadcast::Receiver<Vec<u8>>>>,
|
||||
|
||||
/// Child killer handle for termination on drop (can signal independently
|
||||
/// of a thread blocked in `.wait()`).
|
||||
@@ -38,20 +41,25 @@ impl ExecCommandSession {
|
||||
writer_handle: JoinHandle<()>,
|
||||
wait_handle: JoinHandle<()>,
|
||||
exit_status: std::sync::Arc<std::sync::atomic::AtomicBool>,
|
||||
) -> (Self, broadcast::Receiver<Vec<u8>>) {
|
||||
let initial_output_rx = output_tx.subscribe();
|
||||
(
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
exit_status,
|
||||
},
|
||||
initial_output_rx,
|
||||
)
|
||||
) -> Self {
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
initial_output_rx: StdMutex::new(None),
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
exit_status,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_initial_output_receiver(&self, receiver: broadcast::Receiver<Vec<u8>>) {
|
||||
if let Ok(mut guard) = self.initial_output_rx.lock()
|
||||
&& guard.is_none()
|
||||
{
|
||||
*guard = Some(receiver);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
@@ -59,7 +67,13 @@ impl ExecCommandSession {
|
||||
}
|
||||
|
||||
pub(crate) fn output_receiver(&self) -> broadcast::Receiver<Vec<u8>> {
|
||||
self.output_tx.subscribe()
|
||||
if let Ok(mut guard) = self.initial_output_rx.lock()
|
||||
&& let Some(receiver) = guard.take()
|
||||
{
|
||||
receiver
|
||||
} else {
|
||||
self.output_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn has_exited(&self) -> bool {
|
||||
|
||||
@@ -12,3 +12,4 @@ pub use responses_api::WRITE_STDIN_TOOL_NAME;
|
||||
pub use responses_api::create_exec_command_tool_for_responses_api;
|
||||
pub use responses_api::create_write_stdin_tool_for_responses_api;
|
||||
pub use session_manager::SessionManager as ExecSessionManager;
|
||||
pub use session_manager::result_into_payload;
|
||||
|
||||
@@ -21,6 +21,7 @@ use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use crate::exec_command::exec_command_session::ExecCommandSession;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
use crate::truncate::truncate_middle;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SessionManager {
|
||||
@@ -37,7 +38,7 @@ pub struct ExecCommandOutput {
|
||||
}
|
||||
|
||||
impl ExecCommandOutput {
|
||||
pub(crate) fn to_text_output(&self) -> String {
|
||||
fn to_text_output(&self) -> String {
|
||||
let wall_time_secs = self.wall_time.as_secs_f32();
|
||||
let termination_status = match self.exit_status {
|
||||
ExitStatus::Exited(code) => format!("Process exited with code {code}"),
|
||||
@@ -67,6 +68,19 @@ pub enum ExitStatus {
|
||||
Ongoing(SessionId),
|
||||
}
|
||||
|
||||
pub fn result_into_payload(result: Result<ExecCommandOutput, String>) -> FunctionCallOutputPayload {
|
||||
match result {
|
||||
Ok(output) => FunctionCallOutputPayload {
|
||||
content: output.to_text_output(),
|
||||
success: Some(true),
|
||||
},
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: err,
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionManager {
|
||||
/// Processes the request and is required to send a response via `outgoing`.
|
||||
pub async fn handle_exec_command_request(
|
||||
@@ -79,16 +93,18 @@ impl SessionManager {
|
||||
.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
|
||||
);
|
||||
|
||||
let (session, mut output_rx, mut exit_rx) = create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
let (session, mut exit_rx) =
|
||||
create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
|
||||
// Insert into session map.
|
||||
let mut output_rx = session.output_receiver();
|
||||
self.sessions.lock().await.insert(session_id, session);
|
||||
|
||||
// Collect output until either timeout expires or process exits.
|
||||
@@ -229,11 +245,7 @@ impl SessionManager {
|
||||
/// Spawn PTY and child process per spawn_exec_command_session logic.
|
||||
async fn create_exec_command_session(
|
||||
params: ExecCommandParams,
|
||||
) -> anyhow::Result<(
|
||||
ExecCommandSession,
|
||||
tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
oneshot::Receiver<i32>,
|
||||
)> {
|
||||
) -> anyhow::Result<(ExecCommandSession, oneshot::Receiver<i32>)> {
|
||||
let ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: _,
|
||||
@@ -267,6 +279,8 @@ async fn create_exec_command_session(
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
// Broadcast for streaming PTY output to readers: subscribers receive from subscription time.
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
let initial_output_rx = output_tx.subscribe();
|
||||
|
||||
// Reader task: drain PTY and forward chunks to output channel.
|
||||
let mut reader = pair.master.try_clone_reader()?;
|
||||
let output_tx_clone = output_tx.clone();
|
||||
@@ -328,7 +342,7 @@ async fn create_exec_command_session(
|
||||
});
|
||||
|
||||
// Create and store the session with channels.
|
||||
let (session, initial_output_rx) = ExecCommandSession::new(
|
||||
let session = ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
@@ -337,7 +351,8 @@ async fn create_exec_command_session(
|
||||
wait_handle,
|
||||
exit_status,
|
||||
);
|
||||
Ok((session, initial_output_rx, exit_rx))
|
||||
session.set_initial_output_receiver(initial_output_rx);
|
||||
Ok((session, exit_rx))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use env_flags::env_flags;
|
||||
|
||||
env_flags! {
|
||||
pub OPENAI_API_BASE: &str = "https://api.openai.com/v1";
|
||||
|
||||
/// Fallback when the provider-specific key is not set.
|
||||
pub OPENAI_API_KEY: Option<&str> = None;
|
||||
pub OPENAI_TIMEOUT_MS: Duration = Duration::from_millis(300_000), |value| {
|
||||
value.parse().map(Duration::from_millis)
|
||||
};
|
||||
|
||||
/// Fixture path for offline tests (see client.rs).
|
||||
pub CODEX_RS_SSE_FIXTURE: Option<&str> = None;
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum FunctionCallError {
|
||||
#[error("{0}")]
|
||||
RespondToModel(String),
|
||||
}
|
||||
@@ -108,61 +108,6 @@ pub async fn collect_git_info(cwd: &Path) -> Option<GitInfo> {
|
||||
Some(git_info)
|
||||
}
|
||||
|
||||
/// A minimal commit summary entry used for pickers (subject + timestamp + sha).
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CommitLogEntry {
|
||||
pub sha: String,
|
||||
/// Unix timestamp (seconds since epoch) of the commit time (committer time).
|
||||
pub timestamp: i64,
|
||||
/// Single-line subject of the commit message.
|
||||
pub subject: String,
|
||||
}
|
||||
|
||||
/// Return the last `limit` commits reachable from HEAD for the current branch.
|
||||
/// Each entry contains the SHA, commit timestamp (seconds), and subject line.
|
||||
/// Returns an empty vector if not in a git repo or on error/timeout.
|
||||
pub async fn recent_commits(cwd: &Path, limit: usize) -> Vec<CommitLogEntry> {
|
||||
// Ensure we're in a git repo first to avoid noisy errors.
|
||||
let Some(out) = run_git_command_with_timeout(&["rev-parse", "--git-dir"], cwd).await else {
|
||||
return Vec::new();
|
||||
};
|
||||
if !out.status.success() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let fmt = "%H%x1f%ct%x1f%s"; // <sha> <US> <commit_time> <US> <subject>
|
||||
let n = limit.max(1).to_string();
|
||||
let Some(log_out) =
|
||||
run_git_command_with_timeout(&["log", "-n", &n, &format!("--pretty=format:{fmt}")], cwd)
|
||||
.await
|
||||
else {
|
||||
return Vec::new();
|
||||
};
|
||||
if !log_out.status.success() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let text = String::from_utf8_lossy(&log_out.stdout);
|
||||
let mut entries: Vec<CommitLogEntry> = Vec::new();
|
||||
for line in text.lines() {
|
||||
let mut parts = line.split('\u{001f}');
|
||||
let sha = parts.next().unwrap_or("").trim();
|
||||
let ts_s = parts.next().unwrap_or("").trim();
|
||||
let subject = parts.next().unwrap_or("").trim();
|
||||
if sha.is_empty() || ts_s.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let timestamp = ts_s.parse::<i64>().unwrap_or(0);
|
||||
entries.push(CommitLogEntry {
|
||||
sha: sha.to_string(),
|
||||
timestamp,
|
||||
subject: subject.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
entries
|
||||
}
|
||||
|
||||
/// Returns the closest git sha to HEAD that is on a remote as well as the diff to that sha.
|
||||
pub async fn git_diff_to_remote(cwd: &Path) -> Option<GitDiffToRemote> {
|
||||
get_git_repo_root(cwd)?;
|
||||
@@ -200,7 +145,7 @@ async fn get_git_remotes(cwd: &Path) -> Option<Vec<String>> {
|
||||
let mut remotes: Vec<String> = String::from_utf8(output.stdout)
|
||||
.ok()?
|
||||
.lines()
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
if let Some(pos) = remotes.iter().position(|r| r == "origin") {
|
||||
let origin = remotes.remove(pos);
|
||||
@@ -257,11 +202,6 @@ async fn get_default_branch(cwd: &Path) -> Option<String> {
|
||||
}
|
||||
|
||||
// No remote-derived default; try common local defaults if they exist
|
||||
get_default_branch_local(cwd).await
|
||||
}
|
||||
|
||||
/// Attempt to determine the repository's default branch name from local branches.
|
||||
async fn get_default_branch_local(cwd: &Path) -> Option<String> {
|
||||
for candidate in ["main", "master"] {
|
||||
if let Some(verify) = run_git_command_with_timeout(
|
||||
&[
|
||||
@@ -477,7 +417,7 @@ async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> {
|
||||
let untracked: Vec<String> = String::from_utf8(untracked_output.stdout)
|
||||
.ok()?
|
||||
.lines()
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
@@ -545,51 +485,10 @@ pub fn resolve_root_git_project_for_trust(cwd: &Path) -> Option<PathBuf> {
|
||||
git_dir_path.parent().map(Path::to_path_buf)
|
||||
}
|
||||
|
||||
/// Returns a list of local git branches.
|
||||
/// Includes the default branch at the beginning of the list, if it exists.
|
||||
pub async fn local_git_branches(cwd: &Path) -> Vec<String> {
|
||||
let mut branches: Vec<String> = if let Some(out) =
|
||||
run_git_command_with_timeout(&["branch", "--format=%(refname:short)"], cwd).await
|
||||
&& out.status.success()
|
||||
{
|
||||
String::from_utf8_lossy(&out.stdout)
|
||||
.lines()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
branches.sort_unstable();
|
||||
|
||||
if let Some(base) = get_default_branch_local(cwd).await
|
||||
&& let Some(pos) = branches.iter().position(|name| name == &base)
|
||||
{
|
||||
let base_branch = branches.remove(pos);
|
||||
branches.insert(0, base_branch);
|
||||
}
|
||||
|
||||
branches
|
||||
}
|
||||
|
||||
/// Returns the current checked out branch name.
|
||||
pub async fn current_branch_name(cwd: &Path) -> Option<String> {
|
||||
let out = run_git_command_with_timeout(&["branch", "--show-current"], cwd).await?;
|
||||
if !out.status.success() {
|
||||
return None;
|
||||
}
|
||||
String::from_utf8(out.stdout)
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|name| !name.is_empty())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use core_test_support::skip_if_sandbox;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
@@ -652,81 +551,6 @@ mod tests {
|
||||
repo_path
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_recent_commits_non_git_directory_returns_empty() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let entries = recent_commits(temp_dir.path(), 10).await;
|
||||
assert!(entries.is_empty(), "expected no commits outside a git repo");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_recent_commits_orders_and_limits() {
|
||||
skip_if_sandbox!();
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
|
||||
// Make three distinct commits with small delays to ensure ordering by timestamp.
|
||||
fs::write(repo_path.join("file.txt"), "one").unwrap();
|
||||
Command::new("git")
|
||||
.args(["add", "file.txt"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git add");
|
||||
Command::new("git")
|
||||
.args(["commit", "-m", "first change"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git commit 1");
|
||||
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
|
||||
fs::write(repo_path.join("file.txt"), "two").unwrap();
|
||||
Command::new("git")
|
||||
.args(["add", "file.txt"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git add 2");
|
||||
Command::new("git")
|
||||
.args(["commit", "-m", "second change"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git commit 2");
|
||||
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
|
||||
fs::write(repo_path.join("file.txt"), "three").unwrap();
|
||||
Command::new("git")
|
||||
.args(["add", "file.txt"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git add 3");
|
||||
Command::new("git")
|
||||
.args(["commit", "-m", "third change"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("git commit 3");
|
||||
|
||||
// Request the latest 3 commits; should be our three changes in reverse time order.
|
||||
let entries = recent_commits(&repo_path, 3).await;
|
||||
assert_eq!(entries.len(), 3);
|
||||
assert_eq!(entries[0].subject, "third change");
|
||||
assert_eq!(entries[1].subject, "second change");
|
||||
assert_eq!(entries[2].subject, "first change");
|
||||
// Basic sanity on SHA formatting
|
||||
for e in entries {
|
||||
assert!(e.sha.len() >= 7 && e.sha.chars().all(|c| c.is_ascii_hexdigit()));
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_test_git_repo_with_remote(temp_dir: &TempDir) -> (PathBuf, String) {
|
||||
let repo_path = create_test_git_repo(temp_dir).await;
|
||||
let remote_path = temp_dir.path().join("remote.git");
|
||||
|
||||
@@ -1,355 +0,0 @@
|
||||
use std::fs as std_fs;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use tokio::fs;
|
||||
use tokio::fs::OpenOptions;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::process::Command;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
/// Represents a linked git worktree managed by Codex.
|
||||
///
|
||||
/// The handle tracks whether Codex created the worktree for the current
|
||||
/// conversation. It leaves the checkout in place until [`remove`] is invoked.
|
||||
pub struct WorktreeHandle {
|
||||
repo_root: PathBuf,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl WorktreeHandle {
|
||||
/// Create (or reuse) a worktree rooted at
|
||||
/// `<repo_root>/codex/worktree/<conversation_id>`.
|
||||
pub async fn create(repo_root: &Path, conversation_id: &ConversationId) -> Result<Self> {
|
||||
if !repo_root.exists() {
|
||||
return Err(anyhow!(
|
||||
"git worktree root `{}` does not exist",
|
||||
repo_root.display()
|
||||
));
|
||||
}
|
||||
|
||||
let repo_root = repo_root.to_path_buf();
|
||||
let codex_dir = repo_root.join("codex");
|
||||
let codex_worktree_dir = codex_dir.join("worktree");
|
||||
fs::create_dir_all(&codex_worktree_dir)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to create codex worktree directory at `{}`",
|
||||
codex_worktree_dir.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let path = codex_worktree_dir.join(conversation_id.to_string());
|
||||
let is_registered = worktree_registered(&repo_root, &path).await?;
|
||||
|
||||
if is_registered {
|
||||
if path.exists() {
|
||||
if let Err(err) = ensure_codex_excluded(&repo_root).await {
|
||||
warn!("failed to add codex worktree path to git exclude: {err:#}");
|
||||
}
|
||||
info!(
|
||||
worktree = %path.display(),
|
||||
"reusing existing git worktree for conversation"
|
||||
);
|
||||
return Ok(Self { repo_root, path });
|
||||
}
|
||||
|
||||
warn!(
|
||||
worktree = %path.display(),
|
||||
"git worktree is registered but missing on disk; pruning stale entry"
|
||||
);
|
||||
run_git_command(&repo_root, ["worktree", "prune", "--expire", "now"])
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to prune git worktrees while recovering `{}`",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
if worktree_registered(&repo_root, &path).await? {
|
||||
return Err(anyhow!(
|
||||
"git worktree `{}` is registered but missing on disk; run `git worktree prune --expire now` to remove the stale entry",
|
||||
path.display()
|
||||
));
|
||||
}
|
||||
|
||||
info!(
|
||||
worktree = %path.display(),
|
||||
"recreating git worktree for conversation after pruning stale registration"
|
||||
);
|
||||
}
|
||||
|
||||
if path.exists() {
|
||||
return Err(anyhow!(
|
||||
"git worktree path `{}` already exists but is not registered; remove it manually",
|
||||
path.display()
|
||||
));
|
||||
}
|
||||
|
||||
run_git_command(
|
||||
&repo_root,
|
||||
[
|
||||
"worktree",
|
||||
"add",
|
||||
"--detach",
|
||||
path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"failed to convert worktree path `{}` to UTF-8",
|
||||
path.display()
|
||||
)
|
||||
})?,
|
||||
"HEAD",
|
||||
],
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("failed to create git worktree at `{}`", path.display()))?;
|
||||
|
||||
if let Err(err) = ensure_codex_excluded(&repo_root).await {
|
||||
warn!("failed to add codex worktree path to git exclude: {err:#}");
|
||||
}
|
||||
|
||||
info!(
|
||||
worktree = %path.display(),
|
||||
"created git worktree for conversation"
|
||||
);
|
||||
|
||||
Ok(Self { repo_root, path })
|
||||
}
|
||||
|
||||
/// Absolute path to the worktree checkout on disk.
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// Remove the worktree and prune metadata from the repository.
|
||||
pub async fn remove(self) -> Result<()> {
|
||||
let path = self.path.clone();
|
||||
|
||||
// `git worktree remove` fails if refs are missing or the checkout is dirty.
|
||||
// Use --force to ensure best effort removal; the user explicitly requested it.
|
||||
run_git_command(
|
||||
&self.repo_root,
|
||||
[
|
||||
"worktree",
|
||||
"remove",
|
||||
"--force",
|
||||
path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"failed to convert worktree path `{}` to UTF-8",
|
||||
path.display()
|
||||
)
|
||||
})?,
|
||||
],
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("failed to remove git worktree `{}`", path.display()))?;
|
||||
|
||||
// Prune dangling metadata so repeated sessions do not accumulate entries.
|
||||
if let Err(err) =
|
||||
run_git_command(&self.repo_root, ["worktree", "prune", "--expire", "now"]).await
|
||||
{
|
||||
warn!("failed to prune git worktrees: {err:#}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn worktree_registered(repo_root: &Path, target: &Path) -> Result<bool> {
|
||||
let output = run_git_command(repo_root, ["worktree", "list", "--porcelain"]).await?;
|
||||
let stdout = String::from_utf8(output.stdout)?;
|
||||
|
||||
let target_canon = std_fs::canonicalize(target).unwrap_or_else(|_| target.to_path_buf());
|
||||
|
||||
for line in stdout.lines() {
|
||||
if let Some(path) = line.strip_prefix("worktree ") {
|
||||
let candidate = Path::new(path);
|
||||
let candidate_canon =
|
||||
std_fs::canonicalize(candidate).unwrap_or_else(|_| candidate.to_path_buf());
|
||||
if candidate_canon == target_canon {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
async fn run_git_command<'a>(
|
||||
repo_root: &Path,
|
||||
args: impl IntoIterator<Item = &'a str>,
|
||||
) -> Result<std::process::Output> {
|
||||
let mut cmd = Command::new("git");
|
||||
cmd.args(args);
|
||||
cmd.current_dir(repo_root);
|
||||
let output = cmd
|
||||
.output()
|
||||
.await
|
||||
.with_context(|| format!("failed to execute git command in `{}`", repo_root.display()))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
|
||||
let status = output
|
||||
.status
|
||||
.code()
|
||||
.map(|c| c.to_string())
|
||||
.unwrap_or_else(|| "signal".to_string());
|
||||
return Err(anyhow!("git command exited with status {status}: {stderr}",));
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
async fn ensure_codex_excluded(repo_root: &Path) -> Result<()> {
|
||||
const PATTERN: &str = "/codex/";
|
||||
|
||||
let git_dir_out = run_git_command(repo_root, ["rev-parse", "--git-dir"]).await?;
|
||||
let git_dir_str = String::from_utf8(git_dir_out.stdout)?.trim().to_string();
|
||||
let git_dir_path = if Path::new(&git_dir_str).is_absolute() {
|
||||
PathBuf::from(&git_dir_str)
|
||||
} else {
|
||||
repo_root.join(&git_dir_str)
|
||||
};
|
||||
|
||||
let info_dir = git_dir_path.join("info");
|
||||
fs::create_dir_all(&info_dir).await?;
|
||||
let exclude_path = info_dir.join("exclude");
|
||||
|
||||
let existing_bytes = fs::read(&exclude_path).await.unwrap_or_default();
|
||||
let existing = String::from_utf8(existing_bytes).unwrap_or_default();
|
||||
if existing.lines().any(|line| line.trim() == PATTERN) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&exclude_path)
|
||||
.await?;
|
||||
|
||||
if !existing.is_empty() && !existing.ends_with('\n') {
|
||||
file.write_all(b"\n").await?;
|
||||
}
|
||||
file.write_all(PATTERN.as_bytes()).await?;
|
||||
file.write_all(b"\n").await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
const GIT_ENV: [(&str, &str); 2] = [
|
||||
("GIT_CONFIG_GLOBAL", "/dev/null"),
|
||||
("GIT_CONFIG_NOSYSTEM", "1"),
|
||||
];
|
||||
|
||||
async fn init_repo() -> (TempDir, PathBuf) {
|
||||
let temp = TempDir::new().expect("tempdir");
|
||||
let repo_path = temp.path().join("repo");
|
||||
fs::create_dir_all(&repo_path)
|
||||
.await
|
||||
.expect("create repo dir");
|
||||
|
||||
run_git_with_env(&repo_path, ["init"], &GIT_ENV)
|
||||
.await
|
||||
.expect("git init");
|
||||
run_git_with_env(&repo_path, ["config", "user.name", "Test User"], &GIT_ENV)
|
||||
.await
|
||||
.expect("config user.name");
|
||||
run_git_with_env(
|
||||
&repo_path,
|
||||
["config", "user.email", "test@example.com"],
|
||||
&GIT_ENV,
|
||||
)
|
||||
.await
|
||||
.expect("config user.email");
|
||||
|
||||
fs::write(repo_path.join("README.md"), b"hello world")
|
||||
.await
|
||||
.expect("write file");
|
||||
run_git_with_env(&repo_path, ["add", "README.md"], &GIT_ENV)
|
||||
.await
|
||||
.expect("git add");
|
||||
run_git_with_env(&repo_path, ["commit", "-m", "init"], &GIT_ENV)
|
||||
.await
|
||||
.expect("git commit");
|
||||
|
||||
(temp, repo_path)
|
||||
}
|
||||
|
||||
async fn run_git_with_env<'a>(
|
||||
cwd: &Path,
|
||||
args: impl IntoIterator<Item = &'a str>,
|
||||
envs: &[(&str, &str)],
|
||||
) -> Result<()> {
|
||||
let mut cmd = Command::new("git");
|
||||
cmd.args(args);
|
||||
cmd.current_dir(cwd);
|
||||
for (key, value) in envs {
|
||||
cmd.env(key, value);
|
||||
}
|
||||
let status = cmd.status().await.context("failed to spawn git command")?;
|
||||
if !status.success() {
|
||||
return Err(anyhow!(
|
||||
"git command exited with status {status} (cwd: {})",
|
||||
cwd.display()
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn is_registered(repo_root: &Path, path: &Path) -> bool {
|
||||
worktree_registered(repo_root, path).await.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn creates_and_removes_worktree() {
|
||||
let (_temp, repo) = init_repo().await;
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let handle = WorktreeHandle::create(&repo, &conversation_id)
|
||||
.await
|
||||
.expect("create worktree");
|
||||
let path = handle.path().to_path_buf();
|
||||
assert!(path.exists(), "worktree path should exist on disk");
|
||||
assert!(
|
||||
is_registered(&repo, &path).await,
|
||||
"worktree should be registered"
|
||||
);
|
||||
|
||||
handle.remove().await.expect("remove worktree");
|
||||
assert!(
|
||||
!is_registered(&repo, &path).await,
|
||||
"worktree should be removed from registration"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn reuses_existing_worktree() {
|
||||
let (_temp, repo) = init_repo().await;
|
||||
let conversation_id = ConversationId::new();
|
||||
|
||||
let first = WorktreeHandle::create(&repo, &conversation_id)
|
||||
.await
|
||||
.expect("create worktree");
|
||||
let path = first.path().to_path_buf();
|
||||
drop(first);
|
||||
|
||||
let second = WorktreeHandle::create(&repo, &conversation_id)
|
||||
.await
|
||||
.expect("reuse worktree");
|
||||
assert_eq!(path, second.path());
|
||||
assert!(is_registered(&repo, second.path()).await);
|
||||
|
||||
second.remove().await.expect("remove worktree");
|
||||
}
|
||||
}
|
||||
68
codex-rs/core/src/internal_storage.rs
Normal file
68
codex-rs/core/src/internal_storage.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub(crate) const INTERNAL_STORAGE_FILE: &str = "internal_storage.json";
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct InternalStorage {
|
||||
#[serde(skip)]
|
||||
storage_path: PathBuf,
|
||||
#[serde(default, alias = "gpt_5_high_model_prompt_seen")]
|
||||
pub swiftfox_model_prompt_seen: bool,
|
||||
}
|
||||
|
||||
// TODO(jif) generalise all the file writers and build proper async channel inserters.
|
||||
impl InternalStorage {
|
||||
pub fn load(codex_home: &Path) -> Self {
|
||||
let storage_path = codex_home.join(INTERNAL_STORAGE_FILE);
|
||||
|
||||
match std::fs::read_to_string(&storage_path) {
|
||||
Ok(serialized) => match serde_json::from_str::<Self>(&serialized) {
|
||||
Ok(mut storage) => {
|
||||
storage.storage_path = storage_path;
|
||||
storage
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!("failed to parse internal storage: {error:?}");
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
tracing::warn!("failed to read internal storage: {error:?}");
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn empty(storage_path: PathBuf) -> Self {
|
||||
Self {
|
||||
storage_path,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn persist(&self) -> anyhow::Result<()> {
|
||||
let serialized = serde_json::to_string_pretty(self)?;
|
||||
|
||||
if let Some(parent) = self.storage_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create internal storage directory at {}",
|
||||
parent.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
tokio::fs::write(&self.storage_path, serialized)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to persist internal storage at {}",
|
||||
self.storage_path.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,7 @@
|
||||
use crate::bash::parse_bash_lc_plain_commands;
|
||||
use crate::bash::try_parse_bash;
|
||||
use crate::bash::try_parse_word_only_commands_sequence;
|
||||
|
||||
pub fn is_known_safe_command(command: &[String]) -> bool {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
use super::windows_safe_commands::is_safe_command_windows;
|
||||
if is_safe_command_windows(command) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if is_safe_to_call_with_exec(command) {
|
||||
return true;
|
||||
}
|
||||
@@ -19,7 +12,11 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
|
||||
// introduce side effects ( "&&", "||", ";", and "|" ). If every
|
||||
// individual command in the script is itself a known‑safe command, then
|
||||
// the composite expression is considered safe.
|
||||
if let Some(all_commands) = parse_bash_lc_plain_commands(command)
|
||||
if let [bash, flag, script] = command
|
||||
&& bash == "bash"
|
||||
&& flag == "-lc"
|
||||
&& let Some(tree) = try_parse_bash(script)
|
||||
&& let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script)
|
||||
&& !all_commands.is_empty()
|
||||
&& all_commands
|
||||
.iter()
|
||||
@@ -27,6 +24,7 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
@@ -162,10 +160,9 @@ fn is_valid_sed_n_arg(arg: Option<&str>) -> bool {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::string::ToString;
|
||||
|
||||
fn vec_str(args: &[&str]) -> Vec<String> {
|
||||
args.iter().map(ToString::to_string).collect()
|
||||
args.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -16,22 +16,21 @@ use tokio::process::Child;
|
||||
pub async fn spawn_command_under_linux_sandbox<P>(
|
||||
codex_linux_sandbox_exe: P,
|
||||
command: Vec<String>,
|
||||
command_cwd: PathBuf,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, sandbox_policy_cwd);
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = Some("codex-linux-sandbox");
|
||||
spawn_child_async(
|
||||
codex_linux_sandbox_exe.as_ref().to_path_buf(),
|
||||
args,
|
||||
arg0,
|
||||
command_cwd,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -43,13 +42,10 @@ where
|
||||
fn create_linux_sandbox_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_cwd = sandbox_policy_cwd
|
||||
.to_str()
|
||||
.expect("cwd must be valid UTF-8")
|
||||
.to_string();
|
||||
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_json =
|
||||
|
||||
@@ -15,7 +15,6 @@ pub mod codex;
|
||||
mod codex_conversation;
|
||||
pub mod token_data;
|
||||
pub use codex_conversation::CodexConversation;
|
||||
mod command_safety;
|
||||
pub mod config;
|
||||
pub mod config_edit;
|
||||
pub mod config_profile;
|
||||
@@ -29,12 +28,14 @@ mod exec_command;
|
||||
pub mod exec_env;
|
||||
mod flags;
|
||||
pub mod git_info;
|
||||
mod git_worktree;
|
||||
pub mod internal_storage;
|
||||
mod is_safe_command;
|
||||
pub mod landlock;
|
||||
mod mcp_connection_manager;
|
||||
mod mcp_tool_call;
|
||||
mod message_history;
|
||||
mod model_provider_info;
|
||||
mod normalize_command;
|
||||
pub mod parse_command;
|
||||
mod truncate;
|
||||
mod unified_exec;
|
||||
@@ -46,7 +47,6 @@ pub use model_provider_info::built_in_model_providers;
|
||||
pub use model_provider_info::create_oss_provider_with_base_url;
|
||||
mod conversation_manager;
|
||||
mod event_mapping;
|
||||
pub mod review_format;
|
||||
pub use codex_protocol::protocol::InitialHistory;
|
||||
pub use conversation_manager::ConversationManager;
|
||||
pub use conversation_manager::NewConversation;
|
||||
@@ -75,14 +75,10 @@ pub use rollout::find_conversation_path_by_id_str;
|
||||
pub use rollout::list::ConversationItem;
|
||||
pub use rollout::list::ConversationsPage;
|
||||
pub use rollout::list::Cursor;
|
||||
mod function_tool;
|
||||
mod state;
|
||||
mod tasks;
|
||||
mod user_notification;
|
||||
pub mod util;
|
||||
|
||||
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
|
||||
pub use command_safety::is_safe_command;
|
||||
pub use safety::get_platform_sandbox;
|
||||
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
|
||||
// `codex_core::protocol::...` references continue to work across the workspace.
|
||||
@@ -93,11 +89,8 @@ pub use codex_protocol::config_types as protocol_config_types;
|
||||
|
||||
pub use client::ModelClient;
|
||||
pub use client_common::Prompt;
|
||||
pub use client_common::REVIEW_PROMPT;
|
||||
pub use client_common::ResponseEvent;
|
||||
pub use client_common::ResponseStream;
|
||||
pub use codex::compact::content_items_to_text;
|
||||
pub use codex::compact::is_session_prefix_message;
|
||||
pub use codex_protocol::models::ContentItem;
|
||||
pub use codex_protocol::models::LocalShellAction;
|
||||
pub use codex_protocol::models::LocalShellExecAction;
|
||||
|
||||
@@ -16,7 +16,6 @@ use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use codex_mcp_client::McpClient;
|
||||
use codex_rmcp_client::RmcpClient;
|
||||
use mcp_types::ClientCapabilities;
|
||||
use mcp_types::Implementation;
|
||||
use mcp_types::Tool;
|
||||
@@ -29,7 +28,6 @@ use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::McpServerTransportConfig;
|
||||
|
||||
/// Delimiter used to separate the server name from the tool name in a fully
|
||||
/// qualified tool name.
|
||||
@@ -42,9 +40,6 @@ const MAX_TOOL_NAME_LENGTH: usize = 64;
|
||||
/// Default timeout for initializing MCP server & initially listing tools.
|
||||
const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// Default timeout for individual tool calls.
|
||||
const DEFAULT_TOOL_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
/// Map that holds a startup error for every MCP server that could **not** be
|
||||
/// spawned successfully.
|
||||
pub type ClientStartErrors = HashMap<String, anyhow::Error>;
|
||||
@@ -88,73 +83,8 @@ struct ToolInfo {
|
||||
}
|
||||
|
||||
struct ManagedClient {
|
||||
client: McpClientAdapter,
|
||||
client: Arc<McpClient>,
|
||||
startup_timeout: Duration,
|
||||
tool_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum McpClientAdapter {
|
||||
Legacy(Arc<McpClient>),
|
||||
Rmcp(Arc<RmcpClient>),
|
||||
}
|
||||
|
||||
impl McpClientAdapter {
|
||||
async fn new_stdio_client(
|
||||
use_rmcp_client: bool,
|
||||
program: OsString,
|
||||
args: Vec<OsString>,
|
||||
env: Option<HashMap<String, String>>,
|
||||
params: mcp_types::InitializeRequestParams,
|
||||
startup_timeout: Duration,
|
||||
) -> Result<Self> {
|
||||
info!(
|
||||
"new_stdio_client use_rmcp_client: {use_rmcp_client} program: {program:?} args: {args:?} env: {env:?} params: {params:?} startup_timeout: {startup_timeout:?}"
|
||||
);
|
||||
if use_rmcp_client {
|
||||
let client = Arc::new(RmcpClient::new_stdio_client(program, args, env).await?);
|
||||
client.initialize(params, Some(startup_timeout)).await?;
|
||||
Ok(McpClientAdapter::Rmcp(client))
|
||||
} else {
|
||||
let client = Arc::new(McpClient::new_stdio_client(program, args, env).await?);
|
||||
client.initialize(params, Some(startup_timeout)).await?;
|
||||
Ok(McpClientAdapter::Legacy(client))
|
||||
}
|
||||
}
|
||||
|
||||
async fn new_streamable_http_client(
|
||||
url: String,
|
||||
bearer_token: Option<String>,
|
||||
params: mcp_types::InitializeRequestParams,
|
||||
startup_timeout: Duration,
|
||||
) -> Result<Self> {
|
||||
let client = Arc::new(RmcpClient::new_streamable_http_client(url, bearer_token)?);
|
||||
client.initialize(params, Some(startup_timeout)).await?;
|
||||
Ok(McpClientAdapter::Rmcp(client))
|
||||
}
|
||||
|
||||
async fn list_tools(
|
||||
&self,
|
||||
params: Option<mcp_types::ListToolsRequestParams>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<mcp_types::ListToolsResult> {
|
||||
match self {
|
||||
McpClientAdapter::Legacy(client) => client.list_tools(params, timeout).await,
|
||||
McpClientAdapter::Rmcp(client) => client.list_tools(params, timeout).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn call_tool(
|
||||
&self,
|
||||
name: String,
|
||||
arguments: Option<serde_json::Value>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<mcp_types::CallToolResult> {
|
||||
match self {
|
||||
McpClientAdapter::Legacy(client) => client.call_tool(name, arguments, timeout).await,
|
||||
McpClientAdapter::Rmcp(client) => client.call_tool(name, arguments, timeout).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A thin wrapper around a set of running [`McpClient`] instances.
|
||||
@@ -181,7 +111,6 @@ impl McpConnectionManager {
|
||||
/// user should be informed about these errors.
|
||||
pub async fn new(
|
||||
mcp_servers: HashMap<String, McpServerConfig>,
|
||||
use_rmcp_client: bool,
|
||||
) -> Result<(Self, ClientStartErrors)> {
|
||||
// Early exit if no servers are configured.
|
||||
if mcp_servers.is_empty() {
|
||||
@@ -203,80 +132,67 @@ impl McpConnectionManager {
|
||||
continue;
|
||||
}
|
||||
|
||||
if matches!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp { .. }
|
||||
) && !use_rmcp_client
|
||||
{
|
||||
info!(
|
||||
"skipping MCP server `{}` configured with url because rmcp client is disabled",
|
||||
server_name
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let startup_timeout = cfg
|
||||
.startup_timeout_ms
|
||||
.map(Duration::from_millis)
|
||||
.unwrap_or(DEFAULT_STARTUP_TIMEOUT);
|
||||
|
||||
let startup_timeout = cfg.startup_timeout_sec.unwrap_or(DEFAULT_STARTUP_TIMEOUT);
|
||||
let tool_timeout = cfg.tool_timeout_sec.unwrap_or(DEFAULT_TOOL_TIMEOUT);
|
||||
|
||||
let use_rmcp_client_flag = use_rmcp_client;
|
||||
join_set.spawn(async move {
|
||||
let McpServerConfig { transport, .. } = cfg;
|
||||
let params = mcp_types::InitializeRequestParams {
|
||||
capabilities: ClientCapabilities {
|
||||
experimental: None,
|
||||
roots: None,
|
||||
sampling: None,
|
||||
// https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation#capabilities
|
||||
// indicates this should be an empty object.
|
||||
elicitation: Some(json!({})),
|
||||
},
|
||||
client_info: Implementation {
|
||||
name: "codex-mcp-client".to_owned(),
|
||||
version: env!("CARGO_PKG_VERSION").to_owned(),
|
||||
title: Some("Codex".into()),
|
||||
// This field is used by Codex when it is an MCP
|
||||
// server: it should not be used when Codex is
|
||||
// an MCP client.
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: mcp_types::MCP_SCHEMA_VERSION.to_owned(),
|
||||
};
|
||||
|
||||
let client = match transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
let command_os: OsString = command.into();
|
||||
let args_os: Vec<OsString> = args.into_iter().map(Into::into).collect();
|
||||
McpClientAdapter::new_stdio_client(
|
||||
use_rmcp_client_flag,
|
||||
command_os,
|
||||
args_os,
|
||||
env,
|
||||
params.clone(),
|
||||
startup_timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
McpClientAdapter::new_streamable_http_client(
|
||||
url,
|
||||
bearer_token,
|
||||
params,
|
||||
startup_timeout,
|
||||
)
|
||||
.await
|
||||
let McpServerConfig {
|
||||
command, args, env, ..
|
||||
} = cfg;
|
||||
let client_res = McpClient::new_stdio_client(
|
||||
command.into(),
|
||||
args.into_iter().map(OsString::from).collect(),
|
||||
env,
|
||||
)
|
||||
.await;
|
||||
match client_res {
|
||||
Ok(client) => {
|
||||
// Initialize the client.
|
||||
let params = mcp_types::InitializeRequestParams {
|
||||
capabilities: ClientCapabilities {
|
||||
experimental: None,
|
||||
roots: None,
|
||||
sampling: None,
|
||||
// https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation#capabilities
|
||||
// indicates this should be an empty object.
|
||||
elicitation: Some(json!({})),
|
||||
},
|
||||
client_info: Implementation {
|
||||
name: "codex-mcp-client".to_owned(),
|
||||
version: env!("CARGO_PKG_VERSION").to_owned(),
|
||||
title: Some("Codex".into()),
|
||||
// This field is used by Codex when it is an MCP
|
||||
// server: it should not be used when Codex is
|
||||
// an MCP client.
|
||||
user_agent: None,
|
||||
},
|
||||
protocol_version: mcp_types::MCP_SCHEMA_VERSION.to_owned(),
|
||||
};
|
||||
let initialize_notification_params = None;
|
||||
match client
|
||||
.initialize(
|
||||
params,
|
||||
initialize_notification_params,
|
||||
Some(startup_timeout),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_response) => (server_name, Ok((client, startup_timeout))),
|
||||
Err(e) => (server_name, Err(e)),
|
||||
}
|
||||
}
|
||||
Err(e) => (server_name, Err(e.into())),
|
||||
}
|
||||
.map(|c| (c, startup_timeout));
|
||||
|
||||
((server_name, tool_timeout), client)
|
||||
});
|
||||
}
|
||||
|
||||
let mut clients: HashMap<String, ManagedClient> = HashMap::with_capacity(join_set.len());
|
||||
|
||||
while let Some(res) = join_set.join_next().await {
|
||||
let ((server_name, tool_timeout), client_res) = match res {
|
||||
Ok(result) => result,
|
||||
let (server_name, client_res) = match res {
|
||||
Ok((server_name, client_res)) => (server_name, client_res),
|
||||
Err(e) => {
|
||||
warn!("Task panic when starting MCP server: {e:#}");
|
||||
continue;
|
||||
@@ -288,9 +204,8 @@ impl McpConnectionManager {
|
||||
clients.insert(
|
||||
server_name,
|
||||
ManagedClient {
|
||||
client,
|
||||
client: Arc::new(client),
|
||||
startup_timeout,
|
||||
tool_timeout: Some(tool_timeout),
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -328,13 +243,14 @@ impl McpConnectionManager {
|
||||
server: &str,
|
||||
tool: &str,
|
||||
arguments: Option<serde_json::Value>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<mcp_types::CallToolResult> {
|
||||
let managed = self
|
||||
let client = self
|
||||
.clients
|
||||
.get(server)
|
||||
.ok_or_else(|| anyhow!("unknown MCP server '{server}'"))?;
|
||||
let client = managed.client.clone();
|
||||
let timeout = managed.tool_timeout;
|
||||
.ok_or_else(|| anyhow!("unknown MCP server '{server}'"))?
|
||||
.client
|
||||
.clone();
|
||||
|
||||
client
|
||||
.call_tool(tool.to_string(), arguments, timeout)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use tracing::error;
|
||||
@@ -20,6 +21,7 @@ pub(crate) async fn handle_mcp_tool_call(
|
||||
server: String,
|
||||
tool_name: String,
|
||||
arguments: String,
|
||||
timeout: Option<Duration>,
|
||||
) -> ResponseInputItem {
|
||||
// Parse the `arguments` as JSON. An empty string is OK, but invalid JSON
|
||||
// is not.
|
||||
@@ -56,7 +58,7 @@ pub(crate) async fn handle_mcp_tool_call(
|
||||
let start = Instant::now();
|
||||
// Perform the tool call.
|
||||
let result = sess
|
||||
.call_tool(&server, &tool_name, arguments_value.clone())
|
||||
.call_tool(&server, &tool_name, arguments_value.clone(), timeout)
|
||||
.await
|
||||
.map_err(|e| format!("tool call error: {e}"));
|
||||
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
/// with this content.
|
||||
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
||||
const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md");
|
||||
const SWIFTFOX_INSTRUCTIONS: &str = include_str!("../swiftfox_prompt.md");
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
@@ -99,12 +99,12 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
|
||||
} else if slug.starts_with("codex-") || slug.starts_with("gpt-5-codex") {
|
||||
} else if slug.starts_with("codex-") || slug.starts_with("swiftfox") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
base_instructions: SWIFTFOX_INSTRUCTIONS.to_string(),
|
||||
)
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
model_family!(
|
||||
|
||||
438
codex-rs/core/src/normalize_command.rs
Normal file
438
codex-rs/core/src/normalize_command.rs
Normal file
@@ -0,0 +1,438 @@
|
||||
use crate::bash::extract_words_from_command_node;
|
||||
use crate::bash::find_first_command_node;
|
||||
use crate::bash::remainder_start_after_wrapper_operator;
|
||||
use crate::bash::try_parse_bash;
|
||||
use crate::bash::try_parse_word_only_commands_sequence;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct NormalizedCommand {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub command_for_display: Vec<String>,
|
||||
}
|
||||
|
||||
/// Normalize a shell command for execution and display.
|
||||
///
|
||||
/// For classic wrappers like `bash -lc "cd repo && git status"` returns:
|
||||
/// - command: ["bash", "-lc", "git status"]
|
||||
/// - cwd: original cwd joined with "repo"
|
||||
/// - command_for_display:
|
||||
/// - If the remainder is a single exec-able command, return it tokenized
|
||||
/// (e.g., ["rg", "--files"]).
|
||||
/// - Otherwise, wrap the script as ["bash", "-lc", "<script>"] so that
|
||||
/// [`crate::parse_command::parse_command`] (which currently checks for
|
||||
/// bash -lc) can handle it. Even if the original used zsh, we standardize
|
||||
/// to bash here to match [`crate::parse_command::parse_command`]’s
|
||||
/// expectations.
|
||||
///
|
||||
/// Returns None when no normalization is needed.
|
||||
pub(crate) fn try_normalize_command(command: &[String], cwd: &Path) -> Option<NormalizedCommand> {
|
||||
let invocation = parse_shell_script_from_shell_invocation(command)?;
|
||||
let shell_script = invocation.command.clone();
|
||||
|
||||
let tree = try_parse_bash(&shell_script)?;
|
||||
|
||||
let first_cmd = find_first_command_node(&tree)?;
|
||||
let words = extract_words_from_command_node(first_cmd, &shell_script)?;
|
||||
if !is_command_cd_to_directory(&words) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let idx = remainder_start_after_wrapper_operator(first_cmd, &shell_script)?;
|
||||
let remainder = shell_script[idx..].to_string();
|
||||
|
||||
// For parse_command consumption: if the remainder is a single exec-able
|
||||
// command, return it as argv tokens; otherwise, wrap it in ["bash","-lc",..].
|
||||
let command_for_display = try_parse_bash(&remainder)
|
||||
.and_then(|tree| try_parse_word_only_commands_sequence(&tree, &remainder))
|
||||
.and_then(|mut cmds| match cmds.len() {
|
||||
1 => cmds.pop(),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or_else(|| vec!["bash".to_string(), "-lc".to_string(), remainder.clone()]);
|
||||
|
||||
// Compute new cwd only after confirming the wrapper/operator shape
|
||||
let dir = &words[1];
|
||||
let new_cwd = if Path::new(dir).is_absolute() {
|
||||
PathBuf::from(dir)
|
||||
} else {
|
||||
// TODO(mbolin): Is there anything to worry about if `dir` is a
|
||||
// symlink or contains `..` components?
|
||||
cwd.join(dir)
|
||||
};
|
||||
|
||||
Some(NormalizedCommand {
|
||||
command: vec![invocation.executable, invocation.flag, remainder],
|
||||
cwd: new_cwd,
|
||||
command_for_display,
|
||||
})
|
||||
}
|
||||
|
||||
/// This is similar to [`crate::shell::strip_bash_lc`] and should potentially
|
||||
/// be unified with it.
|
||||
fn parse_shell_script_from_shell_invocation(command: &[String]) -> Option<ShellScriptInvocation> {
|
||||
// Allowed shells
|
||||
fn is_allowed_exe(s: &str) -> bool {
|
||||
matches!(s, "bash" | "/bin/bash" | "zsh" | "/bin/zsh")
|
||||
}
|
||||
match command {
|
||||
// exactly three items: <exe> <flag> <command>
|
||||
[exe, flag, cmd] if is_allowed_exe(exe) && (flag == "-lc" || flag == "-c") => {
|
||||
Some(ShellScriptInvocation {
|
||||
executable: exe.clone(),
|
||||
flag: flag.clone(),
|
||||
command: cmd.clone(),
|
||||
})
|
||||
}
|
||||
// exactly four items: <exe> -l -c <command>
|
||||
[exe, flag1, flag2, cmd] if is_allowed_exe(exe) && flag1 == "-l" && flag2 == "-c" => {
|
||||
Some(ShellScriptInvocation {
|
||||
executable: exe.clone(),
|
||||
flag: "-lc".to_string(),
|
||||
command: cmd.clone(),
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_command_cd_to_directory(command: &[String]) -> bool {
|
||||
matches!(command, [first, _dir] if first == "cd")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct ShellScriptInvocation {
|
||||
executable: String,
|
||||
/// Single arg, so `-l -c` must be collapsed to `-lc`.
|
||||
/// Must be written so `command` follows `flag`, so must
|
||||
/// be `-lc` and not `-cl`.
|
||||
flag: String,
|
||||
command: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NormalizedCommand;
|
||||
use super::try_normalize_command;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_in_bash_script_is_hidden() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && echo hi".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "echo hi".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["echo".into(), "hi".into()],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_shell_var_not_matched() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd $SOME_DIR && ls".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm, None,
|
||||
"Not a classic wrapper (cd arg is a variable), so no normalization."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_quoted_path_is_hidden() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
" cd 'foo bar' && ls".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls".into()],
|
||||
cwd: PathBuf::from("/baz/foo bar"),
|
||||
command_for_display: vec!["ls".into()],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_additional_cd_is_preserved() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && cd bar && ls".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "cd bar && ls".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["bash".into(), "-lc".into(), "cd bar && ls".into(),],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_or_connector_is_preserved() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd and_and || echo \"couldn't find the dir for &&\"".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
// Not a classic wrapper (uses ||), so no normalization.
|
||||
assert_eq!(norm, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_preserves_operators_between_remaining_commands() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && ls && git status".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls && git status".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["bash".into(), "-lc".into(), "ls && git status".into(),],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_preserves_pipelines() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && ls | rg foo".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls | rg foo".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["bash".into(), "-lc".into(), "ls | rg foo".into(),],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_preserves_sequence_operator() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && ls; git status".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls; git status".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["bash".into(), "-lc".into(), "ls; git status".into(),],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_semicolon_is_hidden() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo; ls".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls".into()],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["ls".into()],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pushd_prefix_is_not_normalized() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"pushd foo && ls".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
// We do not normalize pushd because omitting pushd from the execution
|
||||
// would have different semantics than the original command.
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(norm, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn supports_zsh_and_bin_bash_and_split_flags() {
|
||||
// zsh -lc
|
||||
let zsh = vec!["zsh".into(), "-lc".into(), "cd foo && ls".into()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&zsh, &cwd).unwrap();
|
||||
assert_eq!(
|
||||
norm.command,
|
||||
["zsh", "-lc", "ls"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(norm.cwd, PathBuf::from("/baz/foo"));
|
||||
assert_eq!(norm.command_for_display, vec!["ls".to_string()]);
|
||||
|
||||
// /bin/bash -l -c <cmd>
|
||||
let bash_split = vec![
|
||||
"/bin/bash".into(),
|
||||
"-l".into(),
|
||||
"-c".into(),
|
||||
"cd foo && ls".into(),
|
||||
];
|
||||
let norm2 = try_normalize_command(&bash_split, &cwd).unwrap();
|
||||
assert_eq!(
|
||||
norm2.command,
|
||||
["/bin/bash", "-lc", "ls"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(norm2.cwd, PathBuf::from("/baz/foo"));
|
||||
assert_eq!(norm2.command_for_display, vec!["ls".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn supports_dash_c_flag() {
|
||||
let command = vec!["bash".into(), "-c".into(), "cd foo && ls".into()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd).unwrap();
|
||||
assert_eq!(
|
||||
norm.command,
|
||||
["bash", "-c", "ls"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(norm.cwd, PathBuf::from("/baz/foo"));
|
||||
assert_eq!(norm.command_for_display, vec!["ls".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_pipe_operator_immediately_after_cd() {
|
||||
let command = vec!["bash".into(), "-lc".into(), "cd foo | rg bar".into()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(norm, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_unknown_shell_and_bad_flag_order() {
|
||||
// Unknown shell
|
||||
let unknown = vec!["sh".into(), "-lc".into(), "cd foo && ls".into()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
assert_eq!(try_normalize_command(&unknown, &cwd), None);
|
||||
|
||||
// Bad flag order -cl (unsupported)
|
||||
let bad_flag = vec!["bash".into(), "-cl".into(), "cd foo && ls".into()];
|
||||
assert_eq!(try_normalize_command(&bad_flag, &cwd), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn absolute_directory_sets_absolute_cwd() {
|
||||
let command = vec!["bash".into(), "-lc".into(), "cd /tmp && ls".into()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd).unwrap();
|
||||
assert_eq!(norm.cwd, PathBuf::from("/tmp"));
|
||||
assert_eq!(
|
||||
norm.command,
|
||||
["bash", "-lc", "ls"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(norm.command_for_display, vec!["ls".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_shell_command_is_returned_unmodified() {
|
||||
let command = vec!["rg".to_string(), "--files".to_string()];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
// Not a shell wrapper, so no normalization.
|
||||
assert_eq!(norm, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_or_operator_is_preserved() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd missing && ls || echo 'fallback'".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec!["bash".into(), "-lc".into(), "ls || echo 'fallback'".into()],
|
||||
cwd: PathBuf::from("/baz/missing"),
|
||||
command_for_display: vec![
|
||||
"bash".into(),
|
||||
"-lc".into(),
|
||||
"ls || echo 'fallback'".into(),
|
||||
],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cd_prefix_with_ampersands_in_string_is_hidden() {
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"cd foo && echo \"checking && markers\"".to_string(),
|
||||
];
|
||||
let cwd = PathBuf::from("/baz");
|
||||
let norm = try_normalize_command(&command, &cwd);
|
||||
assert_eq!(
|
||||
norm,
|
||||
Some(NormalizedCommand {
|
||||
command: vec![
|
||||
"bash".into(),
|
||||
"-lc".into(),
|
||||
"echo \"checking && markers\"".into(),
|
||||
],
|
||||
cwd: PathBuf::from("/baz/foo"),
|
||||
command_for_display: vec!["echo".into(), "checking && markers".into(),],
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7,14 +7,13 @@ use crate::model_family::ModelFamily;
|
||||
/// Though this would help present more accurate pricing information in the UI.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ModelInfo {
|
||||
/// Size of the context window in tokens. This is the maximum size of the input context.
|
||||
/// Size of the context window in tokens.
|
||||
pub(crate) context_window: u64,
|
||||
|
||||
/// Maximum number of output tokens that can be generated for the model.
|
||||
pub(crate) max_output_tokens: u64,
|
||||
|
||||
/// Token threshold where we should automatically compact conversation history. This considers
|
||||
/// input tokens + output tokens of this turn.
|
||||
/// Token threshold where we should automatically compact conversation history.
|
||||
pub(crate) auto_compact_token_limit: Option<i64>,
|
||||
}
|
||||
|
||||
@@ -62,12 +61,6 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
// https://platform.openai.com/docs/models/gpt-3.5-turbo
|
||||
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
|
||||
|
||||
_ if slug.starts_with("gpt-5-codex") => Some(ModelInfo {
|
||||
context_window: 272_000,
|
||||
max_output_tokens: 128_000,
|
||||
auto_compact_token_limit: Some(350_000),
|
||||
}),
|
||||
|
||||
_ if slug.starts_with("gpt-5") => Some(ModelInfo::new(272_000, 128_000)),
|
||||
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo::new(272_000, 128_000)),
|
||||
|
||||
@@ -7,6 +7,8 @@ use std::collections::HashMap;
|
||||
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
@@ -55,9 +57,10 @@ pub(crate) enum OpenAiTool {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConfigShellToolType {
|
||||
Default,
|
||||
Local,
|
||||
Streamable,
|
||||
DefaultShell,
|
||||
ShellWithRequest { sandbox_policy: SandboxPolicy },
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -72,6 +75,8 @@ pub(crate) struct ToolsConfig {
|
||||
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
@@ -84,6 +89,8 @@ impl ToolsConfig {
|
||||
pub fn new(params: &ToolsConfigParams) -> Self {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
@@ -91,13 +98,18 @@ impl ToolsConfig {
|
||||
include_view_image_tool,
|
||||
experimental_unified_exec_tool,
|
||||
} = params;
|
||||
let shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::Streamable
|
||||
let mut shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::StreamableShell
|
||||
} else if model_family.uses_local_shell_tool {
|
||||
ConfigShellToolType::Local
|
||||
ConfigShellToolType::LocalShell
|
||||
} else {
|
||||
ConfigShellToolType::Default
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest {
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
|
||||
@@ -158,6 +170,40 @@ pub(crate) enum JsonSchema {
|
||||
},
|
||||
}
|
||||
|
||||
fn create_shell_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::String { description: None }),
|
||||
description: Some("The command to execute".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
description: "Runs a shell command and returns its output".to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["command".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_unified_exec_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
@@ -205,7 +251,7 @@ fn create_unified_exec_tool() -> OpenAiTool {
|
||||
})
|
||||
}
|
||||
|
||||
fn create_shell_tool() -> OpenAiTool {
|
||||
fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -227,18 +273,20 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
},
|
||||
);
|
||||
|
||||
properties.insert(
|
||||
if matches!(sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }) {
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
@@ -285,7 +333,7 @@ pub(crate) struct ApplyPatchToolArgs {
|
||||
/// Responses API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
|
||||
pub fn create_tools_json_for_responses_api(
|
||||
tools: &[OpenAiTool],
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
let mut tools_json = Vec::new();
|
||||
|
||||
@@ -300,7 +348,7 @@ pub fn create_tools_json_for_responses_api(
|
||||
/// Chat Completions API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=chat
|
||||
pub(crate) fn create_tools_json_for_chat_completions_api(
|
||||
tools: &[OpenAiTool],
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
// We start with the JSON for the Responses API and than rewrite it to match
|
||||
// the chat completions tool call format.
|
||||
@@ -400,7 +448,10 @@ fn sanitize_json_schema(value: &mut JsonValue) {
|
||||
}
|
||||
|
||||
// Normalize/ensure type
|
||||
let mut ty = map.get("type").and_then(|v| v.as_str()).map(str::to_string);
|
||||
let mut ty = map
|
||||
.get("type")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
// If type is an array (union), pick first supported; else leave to inference
|
||||
if ty.is_none()
|
||||
@@ -486,13 +537,16 @@ pub(crate) fn get_openai_tools(
|
||||
tools.push(create_unified_exec_tool());
|
||||
} else {
|
||||
match &config.shell_type {
|
||||
ConfigShellToolType::Default => {
|
||||
ConfigShellToolType::DefaultShell => {
|
||||
tools.push(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::Local => {
|
||||
ConfigShellToolType::ShellWithRequest { sandbox_policy } => {
|
||||
tools.push(create_shell_tool_for_sandbox(sandbox_policy));
|
||||
}
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::Streamable => {
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
@@ -582,6 +636,8 @@ mod tests {
|
||||
.expect("codex-mini-latest should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -602,6 +658,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -622,6 +680,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -726,6 +786,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
@@ -802,6 +864,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -863,6 +927,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -919,6 +985,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -978,6 +1046,8 @@ mod tests {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -1030,8 +1100,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool() {
|
||||
let tool = super::create_shell_tool();
|
||||
fn test_shell_tool_for_sandbox_workspace_write() {
|
||||
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec!["workspace".into()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
};
|
||||
let tool = super::create_shell_tool_for_sandbox(&sandbox_policy);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1043,4 +1119,33 @@ mod tests {
|
||||
let expected = "Runs a shell command and returns its output.";
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_readonly() {
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::ReadOnly);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
else {
|
||||
panic!("expected function tool");
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = "Runs a shell command and returns its output.";
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_danger_full_access() {
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::DangerFullAccess);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
else {
|
||||
panic!("expected function tool");
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
assert_eq!(description, "Runs a shell command and returns its output.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ impl From<ParsedCommand> for codex_protocol::parse_command::ParsedCommand {
|
||||
}
|
||||
|
||||
fn shlex_join(tokens: &[String]) -> String {
|
||||
shlex_try_join(tokens.iter().map(String::as_str))
|
||||
shlex_try_join(tokens.iter().map(|s| s.as_str()))
|
||||
.unwrap_or_else(|_| "<command included NUL byte>".to_string())
|
||||
}
|
||||
|
||||
@@ -72,14 +72,13 @@ pub fn parse_command(command: &[String]) -> Vec<ParsedCommand> {
|
||||
/// Tests are at the top to encourage using TDD + Codex to fix the implementation.
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::string::ToString;
|
||||
|
||||
fn shlex_split_safe(s: &str) -> Vec<String> {
|
||||
shlex_split(s).unwrap_or_else(|| s.split_whitespace().map(ToString::to_string).collect())
|
||||
shlex_split(s).unwrap_or_else(|| s.split_whitespace().map(|s| s.to_string()).collect())
|
||||
}
|
||||
|
||||
fn vec_str(args: &[&str]) -> Vec<String> {
|
||||
args.iter().map(ToString::to_string).collect()
|
||||
args.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn assert_parsed(args: &[String], expected: Vec<ParsedCommand>) {
|
||||
@@ -895,7 +894,7 @@ fn simplify_once(commands: &[ParsedCommand]) -> Option<Vec<ParsedCommand>> {
|
||||
|
||||
// echo ... && ...rest => ...rest
|
||||
if let ParsedCommand::Unknown { cmd } = &commands[0]
|
||||
&& shlex_split(cmd).is_some_and(|t| t.first().map(String::as_str) == Some("echo"))
|
||||
&& shlex_split(cmd).is_some_and(|t| t.first().map(|s| s.as_str()) == Some("echo"))
|
||||
{
|
||||
return Some(commands[1..].to_vec());
|
||||
}
|
||||
@@ -903,7 +902,7 @@ fn simplify_once(commands: &[ParsedCommand]) -> Option<Vec<ParsedCommand>> {
|
||||
// cd foo && [any command] => [any command] (keep non-cd when a cd is followed by something)
|
||||
if let Some(idx) = commands.iter().position(|pc| match pc {
|
||||
ParsedCommand::Unknown { cmd } => {
|
||||
shlex_split(cmd).is_some_and(|t| t.first().map(String::as_str) == Some("cd"))
|
||||
shlex_split(cmd).is_some_and(|t| t.first().map(|s| s.as_str()) == Some("cd"))
|
||||
}
|
||||
_ => false,
|
||||
}) && commands.len() > idx + 1
|
||||
@@ -1036,7 +1035,7 @@ fn short_display_path(path: &str) -> String {
|
||||
});
|
||||
parts
|
||||
.next()
|
||||
.map(str::to_string)
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| trimmed.to_string())
|
||||
}
|
||||
|
||||
@@ -1157,8 +1156,10 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
// bias toward the primary command when pipelines are present.
|
||||
// First, drop obvious small formatting helpers (e.g., wc/awk/etc).
|
||||
let had_multiple_commands = all_commands.len() > 1;
|
||||
// Commands arrive in source order; drop formatting helpers while preserving it.
|
||||
let filtered_commands = drop_small_formatting_commands(all_commands);
|
||||
// The bash AST walker yields commands in right-to-left order for
|
||||
// connector/pipeline sequences. Reverse to reflect actual execution order.
|
||||
let mut filtered_commands = drop_small_formatting_commands(all_commands);
|
||||
filtered_commands.reverse();
|
||||
if filtered_commands.is_empty() {
|
||||
return Some(vec![ParsedCommand::Unknown {
|
||||
cmd: script.clone(),
|
||||
@@ -1191,8 +1192,8 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
if had_connectors {
|
||||
let has_pipe = script_tokens.iter().any(|t| t == "|");
|
||||
let has_sed_n = script_tokens.windows(2).any(|w| {
|
||||
w.first().map(String::as_str) == Some("sed")
|
||||
&& w.get(1).map(String::as_str) == Some("-n")
|
||||
w.first().map(|s| s.as_str()) == Some("sed")
|
||||
&& w.get(1).map(|s| s.as_str()) == Some("-n")
|
||||
});
|
||||
if has_pipe && has_sed_n {
|
||||
ParsedCommand::Read {
|
||||
@@ -1272,7 +1273,7 @@ fn is_small_formatting_command(tokens: &[String]) -> bool {
|
||||
// Keep `sed -n <range> file` (treated as a file read elsewhere);
|
||||
// otherwise consider it a formatting helper in a pipeline.
|
||||
tokens.len() < 4
|
||||
|| !(tokens[1] == "-n" && is_valid_sed_n_arg(tokens.get(2).map(String::as_str)))
|
||||
|| !(tokens[1] == "-n" && is_valid_sed_n_arg(tokens.get(2).map(|s| s.as_str())))
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
@@ -1319,7 +1320,7 @@ fn summarize_main_tokens(main_cmd: &[String]) -> ParsedCommand {
|
||||
(None, non_flags.first().map(|s| short_display_path(s)))
|
||||
} else {
|
||||
(
|
||||
non_flags.first().cloned().map(String::from),
|
||||
non_flags.first().cloned().map(|s| s.to_string()),
|
||||
non_flags.get(1).map(|s| short_display_path(s)),
|
||||
)
|
||||
};
|
||||
@@ -1354,7 +1355,7 @@ fn summarize_main_tokens(main_cmd: &[String]) -> ParsedCommand {
|
||||
.collect();
|
||||
// Do not shorten the query: grep patterns may legitimately contain slashes
|
||||
// and should be preserved verbatim. Only paths should be shortened.
|
||||
let query = non_flags.first().cloned().map(String::from);
|
||||
let query = non_flags.first().cloned().map(|s| s.to_string());
|
||||
let path = non_flags.get(1).map(|s| short_display_path(s));
|
||||
ParsedCommand::Search {
|
||||
cmd: shlex_join(main_cmd),
|
||||
@@ -1364,7 +1365,7 @@ fn summarize_main_tokens(main_cmd: &[String]) -> ParsedCommand {
|
||||
}
|
||||
Some((head, tail)) if head == "cat" => {
|
||||
// Support both `cat <file>` and `cat -- <file>` forms.
|
||||
let effective_tail: &[String] = if tail.first().map(String::as_str) == Some("--") {
|
||||
let effective_tail: &[String] = if tail.first().map(|s| s.as_str()) == Some("--") {
|
||||
&tail[1..]
|
||||
} else {
|
||||
tail
|
||||
@@ -1480,7 +1481,7 @@ fn summarize_main_tokens(main_cmd: &[String]) -> ParsedCommand {
|
||||
if head == "sed"
|
||||
&& tail.len() >= 3
|
||||
&& tail[0] == "-n"
|
||||
&& is_valid_sed_n_arg(tail.get(1).map(String::as_str)) =>
|
||||
&& is_valid_sed_n_arg(tail.get(1).map(|s| s.as_str())) =>
|
||||
{
|
||||
if let Some(path) = tail.get(2) {
|
||||
let name = short_display_path(path);
|
||||
|
||||
@@ -2,12 +2,13 @@ use std::collections::BTreeMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
|
||||
// Use the canonical plan tool types from the protocol crate to ensure
|
||||
// type-identity matches events transported via `codex_protocol`.
|
||||
@@ -66,20 +67,44 @@ pub(crate) async fn handle_update_plan(
|
||||
session: &Session,
|
||||
arguments: String,
|
||||
sub_id: String,
|
||||
_call_id: String,
|
||||
) -> Result<String, FunctionCallError> {
|
||||
let args = parse_update_plan_arguments(&arguments)?;
|
||||
session
|
||||
.send_event(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::PlanUpdate(args),
|
||||
})
|
||||
.await;
|
||||
Ok("Plan updated".to_string())
|
||||
call_id: String,
|
||||
) -> ResponseInputItem {
|
||||
match parse_update_plan_arguments(arguments, &call_id) {
|
||||
Ok(args) => {
|
||||
let output = ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "Plan updated".to_string(),
|
||||
success: Some(true),
|
||||
},
|
||||
};
|
||||
session
|
||||
.send_event(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::PlanUpdate(args),
|
||||
})
|
||||
.await;
|
||||
output
|
||||
}
|
||||
Err(output) => *output,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_update_plan_arguments(arguments: &str) -> Result<UpdatePlanArgs, FunctionCallError> {
|
||||
serde_json::from_str::<UpdatePlanArgs>(arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e}"))
|
||||
})
|
||||
fn parse_update_plan_arguments(
|
||||
arguments: String,
|
||||
call_id: &str,
|
||||
) -> Result<UpdatePlanArgs, Box<ResponseInputItem>> {
|
||||
match serde_json::from_str::<UpdatePlanArgs>(&arguments) {
|
||||
Ok(args) => Ok(args),
|
||||
Err(e) => {
|
||||
let output = ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!("failed to parse function arguments: {e}"),
|
||||
success: None,
|
||||
},
|
||||
};
|
||||
Err(Box::new(output))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
use crate::protocol::ReviewFinding;
|
||||
|
||||
// Note: We keep this module UI-agnostic. It returns plain strings that
|
||||
// higher layers (e.g., TUI) may style as needed.
|
||||
|
||||
fn format_location(item: &ReviewFinding) -> String {
|
||||
let path = item.code_location.absolute_file_path.display();
|
||||
let start = item.code_location.line_range.start;
|
||||
let end = item.code_location.line_range.end;
|
||||
format!("{path}:{start}-{end}")
|
||||
}
|
||||
|
||||
/// Format a full review findings block as plain text lines.
|
||||
///
|
||||
/// - When `selection` is `Some`, each item line includes a checkbox marker:
|
||||
/// "[x]" for selected items and "[ ]" for unselected. Missing indices
|
||||
/// default to selected.
|
||||
/// - When `selection` is `None`, the marker is omitted and a simple bullet is
|
||||
/// rendered ("- Title — path:start-end").
|
||||
pub fn format_review_findings_block(
|
||||
findings: &[ReviewFinding],
|
||||
selection: Option<&[bool]>,
|
||||
) -> String {
|
||||
let mut lines: Vec<String> = Vec::new();
|
||||
lines.push(String::new());
|
||||
|
||||
// Header
|
||||
if findings.len() > 1 {
|
||||
lines.push("Full review comments:".to_string());
|
||||
} else {
|
||||
lines.push("Review comment:".to_string());
|
||||
}
|
||||
|
||||
for (idx, item) in findings.iter().enumerate() {
|
||||
lines.push(String::new());
|
||||
|
||||
let title = &item.title;
|
||||
let location = format_location(item);
|
||||
|
||||
if let Some(flags) = selection {
|
||||
// Default to selected if index is out of bounds.
|
||||
let checked = flags.get(idx).copied().unwrap_or(true);
|
||||
let marker = if checked { "[x]" } else { "[ ]" };
|
||||
lines.push(format!("- {marker} {title} — {location}"));
|
||||
} else {
|
||||
lines.push(format!("- {title} — {location}"));
|
||||
}
|
||||
|
||||
for body_line in item.body.lines() {
|
||||
lines.push(format!(" {body_line}"));
|
||||
}
|
||||
}
|
||||
|
||||
lines.join("\n")
|
||||
}
|
||||
@@ -25,9 +25,8 @@ pub(crate) fn should_persist_response_item(item: &ResponseItem) -> bool {
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::WebSearchCall { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
| ResponseItem::CustomToolCallOutput { .. } => true,
|
||||
ResponseItem::WebSearchCall { .. } | ResponseItem::Other => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,8 +40,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::AgentReasoningRawContent(_)
|
||||
| EventMsg::TokenCount(_)
|
||||
| EventMsg::EnteredReviewMode(_)
|
||||
| EventMsg::ExitedReviewMode(_)
|
||||
| EventMsg::TurnAborted(_) => true,
|
||||
| EventMsg::ExitedReviewMode(_) => true,
|
||||
EventMsg::Error(_)
|
||||
| EventMsg::TaskStarted(_)
|
||||
| EventMsg::TaskComplete(_)
|
||||
@@ -69,8 +67,8 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::McpListToolsResponse(_)
|
||||
| EventMsg::ListCustomPromptsResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::TurnAborted(_)
|
||||
| EventMsg::ShutdownComplete
|
||||
| EventMsg::ConversationPath(_)
|
||||
| EventMsg::WorktreeRemoved(_) => false,
|
||||
| EventMsg::ConversationPath(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::FormatItem;
|
||||
@@ -26,6 +28,7 @@ use super::policy::is_persisted_response_item;
|
||||
use crate::config::Config;
|
||||
use crate::default_client::ORIGINATOR;
|
||||
use crate::git_info::collect_git_info;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
@@ -33,6 +36,19 @@ use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
pub struct SessionStateSnapshot {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
pub struct SavedSession {
|
||||
pub session: SessionMeta,
|
||||
#[serde(default)]
|
||||
pub items: Vec<ResponseItem>,
|
||||
#[serde(default)]
|
||||
pub state: SessionStateSnapshot,
|
||||
pub session_id: ConversationId,
|
||||
}
|
||||
|
||||
/// Records all [`ResponseItem`]s for a session and flushes them to disk after
|
||||
/// every update.
|
||||
///
|
||||
|
||||
@@ -7,9 +7,7 @@ use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::ApplyPatchFileChange;
|
||||
|
||||
use crate::exec::SandboxType;
|
||||
|
||||
use crate::command_safety::is_dangerous_command::command_might_be_dangerous;
|
||||
use crate::command_safety::is_safe_command::is_known_safe_command;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
|
||||
@@ -87,20 +85,6 @@ pub fn assess_command_safety(
|
||||
approved: &HashSet<Vec<String>>,
|
||||
with_escalated_permissions: bool,
|
||||
) -> SafetyCheck {
|
||||
// Some commands look dangerous. Even if they are run inside a sandbox,
|
||||
// unless the user has explicitly approved them, we should ask,
|
||||
// or reject if the approval_policy tells us not to ask.
|
||||
if command_might_be_dangerous(command) && !approved.contains(command) {
|
||||
if approval_policy == AskForApproval::Never {
|
||||
return SafetyCheck::Reject {
|
||||
reason: "dangerous command detected; rejected by user approval settings"
|
||||
.to_string(),
|
||||
};
|
||||
}
|
||||
|
||||
return SafetyCheck::AskUser;
|
||||
}
|
||||
|
||||
// A command is "trusted" because either:
|
||||
// - it belongs to a set of commands we consider "safe" by default, or
|
||||
// - the user has explicitly approved the command for this session
|
||||
@@ -114,7 +98,6 @@ pub fn assess_command_safety(
|
||||
// would probably be fine to run the command in a sandbox, but when
|
||||
// `approved.contains(command)` is `true`, the user may have approved it for
|
||||
// the session _because_ they know it needs to run outside a sandbox.
|
||||
|
||||
if is_known_safe_command(command) || approved.contains(command) {
|
||||
return SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
@@ -342,56 +325,6 @@ mod tests {
|
||||
assert_eq!(safety_check, SafetyCheck::AskUser);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dangerous_command_allowed_if_explicitly_approved() {
|
||||
let command = vec!["git".to_string(), "reset".to_string(), "--hard".to_string()];
|
||||
let approval_policy = AskForApproval::OnRequest;
|
||||
let sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
let mut approved: HashSet<Vec<String>> = HashSet::new();
|
||||
approved.insert(command.clone());
|
||||
let request_escalated_privileges = false;
|
||||
|
||||
let safety_check = assess_command_safety(
|
||||
&command,
|
||||
approval_policy,
|
||||
&sandbox_policy,
|
||||
&approved,
|
||||
request_escalated_privileges,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
safety_check,
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dangerous_command_not_allowed_if_not_explicitly_approved() {
|
||||
let command = vec!["git".to_string(), "reset".to_string(), "--hard".to_string()];
|
||||
let approval_policy = AskForApproval::Never;
|
||||
let sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
let approved: HashSet<Vec<String>> = HashSet::new();
|
||||
let request_escalated_privileges = false;
|
||||
|
||||
let safety_check = assess_command_safety(
|
||||
&command,
|
||||
approval_policy,
|
||||
&sandbox_policy,
|
||||
&approved,
|
||||
request_escalated_privileges,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
safety_check,
|
||||
SafetyCheck::Reject {
|
||||
reason: "dangerous command detected; rejected by user approval settings"
|
||||
.to_string(),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_escalated_privileges_no_sandbox_fallback() {
|
||||
let command = vec!["git".to_string(), "commit".to_string()];
|
||||
|
||||
@@ -18,20 +18,19 @@ const MACOS_PATH_TO_SEATBELT_EXECUTABLE: &str = "/usr/bin/sandbox-exec";
|
||||
|
||||
pub async fn spawn_command_under_seatbelt(
|
||||
command: Vec<String>,
|
||||
command_cwd: PathBuf,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
mut env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child> {
|
||||
let args = create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd);
|
||||
let args = create_seatbelt_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = None;
|
||||
env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
|
||||
spawn_child_async(
|
||||
PathBuf::from(MACOS_PATH_TO_SEATBELT_EXECUTABLE),
|
||||
args,
|
||||
arg0,
|
||||
command_cwd,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -42,7 +41,7 @@ pub async fn spawn_command_under_seatbelt(
|
||||
fn create_seatbelt_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_policy_cwd: &Path,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
let (file_write_policy, extra_cli_args) = {
|
||||
if sandbox_policy.has_full_disk_write_access() {
|
||||
@@ -52,7 +51,7 @@ fn create_seatbelt_command_args(
|
||||
Vec::<String>::new(),
|
||||
)
|
||||
} else {
|
||||
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(sandbox_policy_cwd);
|
||||
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(cwd);
|
||||
|
||||
let mut writable_folder_policies: Vec<String> = Vec::new();
|
||||
let mut cli_args: Vec<String> = Vec::new();
|
||||
|
||||
@@ -5,20 +5,20 @@ use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct ZshShell {
|
||||
pub(crate) shell_path: String,
|
||||
pub(crate) zshrc_path: String,
|
||||
shell_path: String,
|
||||
zshrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct BashShell {
|
||||
pub(crate) shell_path: String,
|
||||
pub(crate) bashrc_path: String,
|
||||
shell_path: String,
|
||||
bashrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct PowerShellConfig {
|
||||
pub(crate) exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
pub(crate) bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
@@ -32,19 +32,15 @@ pub enum Shell {
|
||||
impl Shell {
|
||||
pub fn format_default_shell_invocation(&self, command: Vec<String>) -> Option<Vec<String>> {
|
||||
match self {
|
||||
Shell::Zsh(zsh) => format_shell_invocation_with_rc(
|
||||
command.as_slice(),
|
||||
&zsh.shell_path,
|
||||
&zsh.zshrc_path,
|
||||
),
|
||||
Shell::Bash(bash) => format_shell_invocation_with_rc(
|
||||
command.as_slice(),
|
||||
&bash.shell_path,
|
||||
&bash.bashrc_path,
|
||||
),
|
||||
Shell::Zsh(zsh) => {
|
||||
format_shell_invocation_with_rc(&command, &zsh.shell_path, &zsh.zshrc_path)
|
||||
}
|
||||
Shell::Bash(bash) => {
|
||||
format_shell_invocation_with_rc(&command, &bash.shell_path, &bash.bashrc_path)
|
||||
}
|
||||
Shell::PowerShell(ps) => {
|
||||
// If model generated a bash command, prefer a detected bash fallback
|
||||
if let Some(script) = strip_bash_lc(command.as_slice()) {
|
||||
if let Some(script) = strip_bash_lc(&command) {
|
||||
return match &ps.bash_exe_fallback {
|
||||
Some(bash) => Some(vec![
|
||||
bash.to_string_lossy().to_string(),
|
||||
@@ -73,7 +69,7 @@ impl Shell {
|
||||
return Some(command);
|
||||
}
|
||||
|
||||
let joined = shlex::try_join(command.iter().map(String::as_str)).ok();
|
||||
let joined = shlex::try_join(command.iter().map(|s| s.as_str())).ok();
|
||||
return joined.map(|arg| {
|
||||
vec![
|
||||
ps.exe.clone(),
|
||||
@@ -106,12 +102,12 @@ impl Shell {
|
||||
}
|
||||
|
||||
fn format_shell_invocation_with_rc(
|
||||
command: &[String],
|
||||
command: &Vec<String>,
|
||||
shell_path: &str,
|
||||
rc_path: &str,
|
||||
) -> Option<Vec<String>> {
|
||||
let joined = strip_bash_lc(command)
|
||||
.or_else(|| shlex::try_join(command.iter().map(String::as_str)).ok())?;
|
||||
.or_else(|| shlex::try_join(command.iter().map(|s| s.as_str())).ok())?;
|
||||
|
||||
let rc_command = if std::path::Path::new(rc_path).exists() {
|
||||
format!("source {rc_path} && ({joined})")
|
||||
@@ -122,8 +118,8 @@ fn format_shell_invocation_with_rc(
|
||||
Some(vec![shell_path.to_string(), "-lc".to_string(), rc_command])
|
||||
}
|
||||
|
||||
fn strip_bash_lc(command: &[String]) -> Option<String> {
|
||||
match command {
|
||||
fn strip_bash_lc(command: &Vec<String>) -> Option<String> {
|
||||
match command.as_slice() {
|
||||
// exactly three items
|
||||
[first, second, third]
|
||||
// first two must be "bash", "-lc"
|
||||
@@ -224,7 +220,6 @@ pub async fn default_user_shell() -> Shell {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::process::Command;
|
||||
use std::string::ToString;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_current_shell_detects_zsh() {
|
||||
@@ -328,7 +323,7 @@ mod tests {
|
||||
});
|
||||
|
||||
let actual_cmd = shell
|
||||
.format_default_shell_invocation(input.iter().map(ToString::to_string).collect());
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| s.replace("BASHRC_PATH", bashrc_path.to_str().unwrap()))
|
||||
@@ -350,7 +345,6 @@ mod tests {
|
||||
},
|
||||
SandboxType::None,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
temp_home.path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
@@ -372,7 +366,6 @@ mod tests {
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos_tests {
|
||||
use super::*;
|
||||
use std::string::ToString;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_run_with_profile_escaping_and_execution() {
|
||||
@@ -436,7 +429,7 @@ mod macos_tests {
|
||||
});
|
||||
|
||||
let actual_cmd = shell
|
||||
.format_default_shell_invocation(input.iter().map(ToString::to_string).collect());
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| s.replace("ZSHRC_PATH", zshrc_path.to_str().unwrap()))
|
||||
@@ -458,7 +451,6 @@ mod macos_tests {
|
||||
},
|
||||
SandboxType::None,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
temp_home.path(),
|
||||
&None,
|
||||
None,
|
||||
)
|
||||
@@ -561,10 +553,10 @@ mod tests_windows {
|
||||
|
||||
for (shell, input, expected_cmd) in cases {
|
||||
let actual_cmd = shell
|
||||
.format_default_shell_invocation(input.iter().map(|s| (*s).to_string()).collect());
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
assert_eq!(
|
||||
actual_cmd,
|
||||
Some(expected_cmd.iter().map(|s| (*s).to_string()).collect())
|
||||
Some(expected_cmd.iter().map(|s| s.to_string()).collect())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
mod service;
|
||||
mod session;
|
||||
mod turn;
|
||||
|
||||
pub(crate) use service::SessionServices;
|
||||
pub(crate) use session::SessionState;
|
||||
pub(crate) use turn::ActiveTurn;
|
||||
pub(crate) use turn::RunningTask;
|
||||
pub(crate) use turn::TaskKind;
|
||||
@@ -1,20 +0,0 @@
|
||||
use crate::RolloutRecorder;
|
||||
use crate::exec_command::ExecSessionManager;
|
||||
use crate::git_worktree::WorktreeHandle;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::unified_exec::UnifiedExecSessionManager;
|
||||
use crate::user_notification::UserNotifier;
|
||||
use std::path::PathBuf;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
pub(crate) struct SessionServices {
|
||||
pub(crate) mcp_connection_manager: McpConnectionManager,
|
||||
pub(crate) session_manager: ExecSessionManager,
|
||||
pub(crate) unified_exec_manager: UnifiedExecSessionManager,
|
||||
pub(crate) notifier: UserNotifier,
|
||||
pub(crate) rollout: Mutex<Option<RolloutRecorder>>,
|
||||
pub(crate) worktree: Mutex<Option<WorktreeHandle>>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) user_shell: crate::shell::Shell,
|
||||
pub(crate) show_raw_agent_reasoning: bool,
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
//! Session-wide mutable state.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
use crate::conversation_history::ConversationHistory;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
|
||||
/// Persistent, session-scoped state previously stored directly on `Session`.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct SessionState {
|
||||
pub(crate) approved_commands: HashSet<Vec<String>>,
|
||||
pub(crate) history: ConversationHistory,
|
||||
pub(crate) token_info: Option<TokenUsageInfo>,
|
||||
pub(crate) latest_rate_limits: Option<RateLimitSnapshot>,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
/// Create a new session state mirroring previous `State::default()` semantics.
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
history: ConversationHistory::new(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
// History helpers
|
||||
pub(crate) fn record_items<I>(&mut self, items: I)
|
||||
where
|
||||
I: IntoIterator,
|
||||
I::Item: std::ops::Deref<Target = ResponseItem>,
|
||||
{
|
||||
self.history.record_items(items)
|
||||
}
|
||||
|
||||
pub(crate) fn history_snapshot(&self) -> Vec<ResponseItem> {
|
||||
self.history.contents()
|
||||
}
|
||||
|
||||
pub(crate) fn replace_history(&mut self, items: Vec<ResponseItem>) {
|
||||
self.history.replace(items);
|
||||
}
|
||||
|
||||
// Approved command helpers
|
||||
pub(crate) fn add_approved_command(&mut self, cmd: Vec<String>) {
|
||||
self.approved_commands.insert(cmd);
|
||||
}
|
||||
|
||||
pub(crate) fn approved_commands_ref(&self) -> &HashSet<Vec<String>> {
|
||||
&self.approved_commands
|
||||
}
|
||||
|
||||
// Token/rate limit helpers
|
||||
pub(crate) fn update_token_info_from_usage(
|
||||
&mut self,
|
||||
usage: &TokenUsage,
|
||||
model_context_window: Option<u64>,
|
||||
) {
|
||||
self.token_info = TokenUsageInfo::new_or_append(
|
||||
&self.token_info,
|
||||
&Some(usage.clone()),
|
||||
model_context_window,
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn set_rate_limits(&mut self, snapshot: RateLimitSnapshot) {
|
||||
self.latest_rate_limits = Some(snapshot);
|
||||
}
|
||||
|
||||
pub(crate) fn token_info_and_rate_limits(
|
||||
&self,
|
||||
) -> (Option<TokenUsageInfo>, Option<RateLimitSnapshot>) {
|
||||
(self.token_info.clone(), self.latest_rate_limits.clone())
|
||||
}
|
||||
|
||||
// Pending input/approval moved to TurnState.
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
//! Turn-scoped state and active turn metadata scaffolding.
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::AbortHandle;
|
||||
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::tasks::SessionTask;
|
||||
|
||||
/// Metadata about the currently running turn.
|
||||
pub(crate) struct ActiveTurn {
|
||||
pub(crate) tasks: IndexMap<String, RunningTask>,
|
||||
pub(crate) turn_state: Arc<Mutex<TurnState>>,
|
||||
}
|
||||
|
||||
impl Default for ActiveTurn {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tasks: IndexMap::new(),
|
||||
turn_state: Arc::new(Mutex::new(TurnState::default())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) enum TaskKind {
|
||||
Regular,
|
||||
Review,
|
||||
Compact,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct RunningTask {
|
||||
pub(crate) handle: AbortHandle,
|
||||
pub(crate) kind: TaskKind,
|
||||
pub(crate) task: Arc<dyn SessionTask>,
|
||||
}
|
||||
|
||||
impl ActiveTurn {
|
||||
pub(crate) fn add_task(&mut self, sub_id: String, task: RunningTask) {
|
||||
self.tasks.insert(sub_id, task);
|
||||
}
|
||||
|
||||
pub(crate) fn remove_task(&mut self, sub_id: &str) -> bool {
|
||||
self.tasks.swap_remove(sub_id);
|
||||
self.tasks.is_empty()
|
||||
}
|
||||
|
||||
pub(crate) fn drain_tasks(&mut self) -> IndexMap<String, RunningTask> {
|
||||
std::mem::take(&mut self.tasks)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mutable state for a single turn.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct TurnState {
|
||||
pending_approvals: HashMap<String, oneshot::Sender<ReviewDecision>>,
|
||||
pending_input: Vec<ResponseInputItem>,
|
||||
}
|
||||
|
||||
impl TurnState {
|
||||
pub(crate) fn insert_pending_approval(
|
||||
&mut self,
|
||||
key: String,
|
||||
tx: oneshot::Sender<ReviewDecision>,
|
||||
) -> Option<oneshot::Sender<ReviewDecision>> {
|
||||
self.pending_approvals.insert(key, tx)
|
||||
}
|
||||
|
||||
pub(crate) fn remove_pending_approval(
|
||||
&mut self,
|
||||
key: &str,
|
||||
) -> Option<oneshot::Sender<ReviewDecision>> {
|
||||
self.pending_approvals.remove(key)
|
||||
}
|
||||
|
||||
pub(crate) fn clear_pending(&mut self) {
|
||||
self.pending_approvals.clear();
|
||||
self.pending_input.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn push_pending_input(&mut self, input: ResponseInputItem) {
|
||||
self.pending_input.push(input);
|
||||
}
|
||||
|
||||
pub(crate) fn take_pending_input(&mut self) -> Vec<ResponseInputItem> {
|
||||
if self.pending_input.is_empty() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut ret = Vec::new();
|
||||
std::mem::swap(&mut ret, &mut self.pending_input);
|
||||
ret
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveTurn {
|
||||
/// Clear any pending approvals and input buffered for the current turn.
|
||||
pub(crate) async fn clear_pending(&self) {
|
||||
let mut ts = self.turn_state.lock().await;
|
||||
ts.clear_pending();
|
||||
}
|
||||
|
||||
/// Best-effort, non-blocking variant for synchronous contexts (Drop/interrupt).
|
||||
pub(crate) fn try_clear_pending_sync(&self) {
|
||||
if let Ok(mut ts) = self.turn_state.try_lock() {
|
||||
ts.clear_pending();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::compact;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::state::TaskKind;
|
||||
|
||||
use super::SessionTask;
|
||||
use super::SessionTaskContext;
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub(crate) struct CompactTask;
|
||||
|
||||
#[async_trait]
|
||||
impl SessionTask for CompactTask {
|
||||
fn kind(&self) -> TaskKind {
|
||||
TaskKind::Compact
|
||||
}
|
||||
|
||||
async fn run(
|
||||
self: Arc<Self>,
|
||||
session: Arc<SessionTaskContext>,
|
||||
ctx: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) -> Option<String> {
|
||||
compact::run_compact_task(session.clone_session(), ctx, sub_id, input).await
|
||||
}
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
mod compact;
|
||||
mod regular;
|
||||
mod review;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tracing::trace;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TurnAbortReason;
|
||||
use crate::protocol::TurnAbortedEvent;
|
||||
use crate::state::ActiveTurn;
|
||||
use crate::state::RunningTask;
|
||||
use crate::state::TaskKind;
|
||||
|
||||
pub(crate) use compact::CompactTask;
|
||||
pub(crate) use regular::RegularTask;
|
||||
pub(crate) use review::ReviewTask;
|
||||
|
||||
/// Thin wrapper that exposes the parts of [`Session`] task runners need.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SessionTaskContext {
|
||||
session: Arc<Session>,
|
||||
}
|
||||
|
||||
impl SessionTaskContext {
|
||||
pub(crate) fn new(session: Arc<Session>) -> Self {
|
||||
Self { session }
|
||||
}
|
||||
|
||||
pub(crate) fn clone_session(&self) -> Arc<Session> {
|
||||
Arc::clone(&self.session)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait SessionTask: Send + Sync + 'static {
|
||||
fn kind(&self) -> TaskKind;
|
||||
|
||||
async fn run(
|
||||
self: Arc<Self>,
|
||||
session: Arc<SessionTaskContext>,
|
||||
ctx: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) -> Option<String>;
|
||||
|
||||
async fn abort(&self, session: Arc<SessionTaskContext>, sub_id: &str) {
|
||||
let _ = (session, sub_id);
|
||||
}
|
||||
}
|
||||
|
||||
impl Session {
|
||||
pub async fn spawn_task<T: SessionTask>(
|
||||
self: &Arc<Self>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
task: T,
|
||||
) {
|
||||
self.abort_all_tasks(TurnAbortReason::Replaced).await;
|
||||
|
||||
let task: Arc<dyn SessionTask> = Arc::new(task);
|
||||
let task_kind = task.kind();
|
||||
|
||||
let handle = {
|
||||
let session_ctx = Arc::new(SessionTaskContext::new(Arc::clone(self)));
|
||||
let ctx = Arc::clone(&turn_context);
|
||||
let task_for_run = Arc::clone(&task);
|
||||
let sub_clone = sub_id.clone();
|
||||
tokio::spawn(async move {
|
||||
let last_agent_message = task_for_run
|
||||
.run(Arc::clone(&session_ctx), ctx, sub_clone.clone(), input)
|
||||
.await;
|
||||
// Emit completion uniformly from spawn site so all tasks share the same lifecycle.
|
||||
let sess = session_ctx.clone_session();
|
||||
sess.on_task_finished(sub_clone, last_agent_message).await;
|
||||
})
|
||||
.abort_handle()
|
||||
};
|
||||
|
||||
let running_task = RunningTask {
|
||||
handle,
|
||||
kind: task_kind,
|
||||
task,
|
||||
};
|
||||
self.register_new_active_task(sub_id, running_task).await;
|
||||
}
|
||||
|
||||
pub async fn abort_all_tasks(self: &Arc<Self>, reason: TurnAbortReason) {
|
||||
for (sub_id, task) in self.take_all_running_tasks().await {
|
||||
self.handle_task_abort(sub_id, task, reason.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn on_task_finished(
|
||||
self: &Arc<Self>,
|
||||
sub_id: String,
|
||||
last_agent_message: Option<String>,
|
||||
) {
|
||||
let mut active = self.active_turn.lock().await;
|
||||
if let Some(at) = active.as_mut()
|
||||
&& at.remove_task(&sub_id)
|
||||
{
|
||||
*active = None;
|
||||
}
|
||||
drop(active);
|
||||
let event = Event {
|
||||
id: sub_id,
|
||||
msg: EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }),
|
||||
};
|
||||
self.send_event(event).await;
|
||||
}
|
||||
|
||||
async fn register_new_active_task(&self, sub_id: String, task: RunningTask) {
|
||||
let mut active = self.active_turn.lock().await;
|
||||
let mut turn = ActiveTurn::default();
|
||||
turn.add_task(sub_id, task);
|
||||
*active = Some(turn);
|
||||
}
|
||||
|
||||
async fn take_all_running_tasks(&self) -> Vec<(String, RunningTask)> {
|
||||
let mut active = self.active_turn.lock().await;
|
||||
match active.take() {
|
||||
Some(mut at) => {
|
||||
at.clear_pending().await;
|
||||
let tasks = at.drain_tasks();
|
||||
tasks.into_iter().collect()
|
||||
}
|
||||
None => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_task_abort(
|
||||
self: &Arc<Self>,
|
||||
sub_id: String,
|
||||
task: RunningTask,
|
||||
reason: TurnAbortReason,
|
||||
) {
|
||||
if task.handle.is_finished() {
|
||||
return;
|
||||
}
|
||||
|
||||
trace!(task_kind = ?task.kind, sub_id, "aborting running task");
|
||||
let session_task = task.task;
|
||||
let handle = task.handle;
|
||||
handle.abort();
|
||||
let session_ctx = Arc::new(SessionTaskContext::new(Arc::clone(self)));
|
||||
session_task.abort(session_ctx, &sub_id).await;
|
||||
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::TurnAborted(TurnAbortedEvent { reason }),
|
||||
};
|
||||
self.send_event(event).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
||||
@@ -1,32 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::run_task;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::state::TaskKind;
|
||||
|
||||
use super::SessionTask;
|
||||
use super::SessionTaskContext;
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub(crate) struct RegularTask;
|
||||
|
||||
#[async_trait]
|
||||
impl SessionTask for RegularTask {
|
||||
fn kind(&self) -> TaskKind {
|
||||
TaskKind::Regular
|
||||
}
|
||||
|
||||
async fn run(
|
||||
self: Arc<Self>,
|
||||
session: Arc<SessionTaskContext>,
|
||||
ctx: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) -> Option<String> {
|
||||
let sess = session.clone_session();
|
||||
run_task(sess, ctx, sub_id, input).await
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::exit_review_mode;
|
||||
use crate::codex::run_task;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::state::TaskKind;
|
||||
|
||||
use super::SessionTask;
|
||||
use super::SessionTaskContext;
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub(crate) struct ReviewTask;
|
||||
|
||||
#[async_trait]
|
||||
impl SessionTask for ReviewTask {
|
||||
fn kind(&self) -> TaskKind {
|
||||
TaskKind::Review
|
||||
}
|
||||
|
||||
async fn run(
|
||||
self: Arc<Self>,
|
||||
session: Arc<SessionTaskContext>,
|
||||
ctx: Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
input: Vec<InputItem>,
|
||||
) -> Option<String> {
|
||||
let sess = session.clone_session();
|
||||
run_task(sess, ctx, sub_id, input).await
|
||||
}
|
||||
|
||||
async fn abort(&self, session: Arc<SessionTaskContext>, sub_id: &str) {
|
||||
exit_review_mode(session.clone_session(), sub_id.to_string(), None).await;
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,11 @@ use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
const APPLY_PATCH_LARK_GRAMMAR: &str = include_str!("tool_apply_patch.lark");
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct ApplyPatchToolArgs {
|
||||
pub(crate) input: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ApplyPatchToolType {
|
||||
|
||||
@@ -65,7 +65,7 @@ impl TurnDiffTracker {
|
||||
let baseline_file_info = if path.exists() {
|
||||
let mode = file_mode_for_path(path);
|
||||
let mode_val = mode.unwrap_or(FileMode::Regular);
|
||||
let content = blob_bytes(path, mode_val).unwrap_or_default();
|
||||
let content = blob_bytes(path, &mode_val).unwrap_or_default();
|
||||
let oid = if mode == Some(FileMode::Symlink) {
|
||||
format!("{:x}", git_blob_sha1_hex_bytes(&content))
|
||||
} else {
|
||||
@@ -266,7 +266,7 @@ impl TurnDiffTracker {
|
||||
};
|
||||
|
||||
let current_mode = file_mode_for_path(¤t_external_path).unwrap_or(FileMode::Regular);
|
||||
let right_bytes = blob_bytes(¤t_external_path, current_mode);
|
||||
let right_bytes = blob_bytes(¤t_external_path, ¤t_mode);
|
||||
|
||||
// Compute displays with &mut self before borrowing any baseline content.
|
||||
let left_display = self.relative_to_git_root_str(&baseline_external_path);
|
||||
@@ -388,7 +388,7 @@ enum FileMode {
|
||||
}
|
||||
|
||||
impl FileMode {
|
||||
fn as_str(self) -> &'static str {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
FileMode::Regular => "100644",
|
||||
#[cfg(unix)]
|
||||
@@ -427,9 +427,9 @@ fn file_mode_for_path(_path: &Path) -> Option<FileMode> {
|
||||
Some(FileMode::Regular)
|
||||
}
|
||||
|
||||
fn blob_bytes(path: &Path, mode: FileMode) -> Option<Vec<u8>> {
|
||||
fn blob_bytes(path: &Path, mode: &FileMode) -> Option<Vec<u8>> {
|
||||
if path.exists() {
|
||||
let contents = if mode == FileMode::Symlink {
|
||||
let contents = if *mode == FileMode::Symlink {
|
||||
symlink_blob_bytes(path)
|
||||
.ok_or_else(|| anyhow!("failed to read symlink target for {}", path.display()))
|
||||
} else {
|
||||
|
||||
@@ -100,13 +100,10 @@ type OutputBuffer = Arc<Mutex<OutputBufferState>>;
|
||||
type OutputHandles = (OutputBuffer, Arc<Notify>);
|
||||
|
||||
impl ManagedUnifiedExecSession {
|
||||
fn new(
|
||||
session: ExecCommandSession,
|
||||
initial_output_rx: tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
) -> Self {
|
||||
fn new(session: ExecCommandSession) -> Self {
|
||||
let output_buffer = Arc::new(Mutex::new(OutputBufferState::default()));
|
||||
let output_notify = Arc::new(Notify::new());
|
||||
let mut receiver = initial_output_rx;
|
||||
let mut receiver = session.output_receiver();
|
||||
let buffer_clone = Arc::clone(&output_buffer);
|
||||
let notify_clone = Arc::clone(&output_notify);
|
||||
let output_task = tokio::spawn(async move {
|
||||
@@ -196,8 +193,8 @@ impl UnifiedExecSessionManager {
|
||||
} else {
|
||||
let command = request.input_chunks.to_vec();
|
||||
let new_id = self.next_session_id.fetch_add(1, Ordering::SeqCst);
|
||||
let (session, initial_output_rx) = create_unified_exec_session(&command).await?;
|
||||
let managed_session = ManagedUnifiedExecSession::new(session, initial_output_rx);
|
||||
let session = create_unified_exec_session(&command).await?;
|
||||
let managed_session = ManagedUnifiedExecSession::new(session);
|
||||
let (buffer, notify) = managed_session.output_handles();
|
||||
writer_tx = managed_session.writer_sender();
|
||||
output_buffer = buffer;
|
||||
@@ -300,13 +297,7 @@ impl UnifiedExecSessionManager {
|
||||
|
||||
async fn create_unified_exec_session(
|
||||
command: &[String],
|
||||
) -> Result<
|
||||
(
|
||||
ExecCommandSession,
|
||||
tokio::sync::broadcast::Receiver<Vec<u8>>,
|
||||
),
|
||||
UnifiedExecError,
|
||||
> {
|
||||
) -> Result<ExecCommandSession, UnifiedExecError> {
|
||||
if command.is_empty() {
|
||||
return Err(UnifiedExecError::MissingCommandLine);
|
||||
}
|
||||
@@ -336,6 +327,7 @@ async fn create_unified_exec_session(
|
||||
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
let initial_output_rx = output_tx.subscribe();
|
||||
|
||||
let mut reader = pair
|
||||
.master
|
||||
@@ -389,7 +381,7 @@ async fn create_unified_exec_session(
|
||||
wait_exit_status.store(true, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let (session, initial_output_rx) = ExecCommandSession::new(
|
||||
let session = ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
@@ -398,14 +390,14 @@ async fn create_unified_exec_session(
|
||||
wait_handle,
|
||||
exit_status,
|
||||
);
|
||||
Ok((session, initial_output_rx))
|
||||
session.set_initial_output_receiver(initial_output_rx);
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[cfg(unix)]
|
||||
use core_test_support::skip_if_sandbox;
|
||||
|
||||
#[test]
|
||||
fn push_chunk_trims_only_excess_bytes() {
|
||||
@@ -427,8 +419,6 @@ mod tests {
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_persists_across_requests_jif() -> Result<(), UnifiedExecError> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
@@ -466,8 +456,6 @@ mod tests {
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn multi_unified_exec_sessions() -> Result<(), UnifiedExecError> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let shell_a = manager
|
||||
@@ -514,8 +502,6 @@ mod tests {
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn unified_exec_timeouts() -> Result<(), UnifiedExecError> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
@@ -565,7 +551,6 @@ mod tests {
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
#[ignore] // Ignored while we have a better way to test this.
|
||||
async fn requests_with_large_timeout_are_capped() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
@@ -587,7 +572,6 @@ mod tests {
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
#[ignore] // Ignored while we have a better way to test this.
|
||||
async fn completed_commands_do_not_persist_sessions() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
@@ -609,8 +593,6 @@ mod tests {
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn reusing_completed_session_returns_unknown_session() -> Result<(), UnifiedExecError> {
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
|
||||
let open_shell = manager
|
||||
|
||||
@@ -1,45 +1,4 @@
|
||||
use serde::Serialize;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct UserNotifier {
|
||||
notify_command: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl UserNotifier {
|
||||
pub(crate) fn notify(&self, notification: &UserNotification) {
|
||||
if let Some(notify_command) = &self.notify_command
|
||||
&& !notify_command.is_empty()
|
||||
{
|
||||
self.invoke_notify(notify_command, notification)
|
||||
}
|
||||
}
|
||||
|
||||
fn invoke_notify(&self, notify_command: &[String], notification: &UserNotification) {
|
||||
let Ok(json) = serde_json::to_string(¬ification) else {
|
||||
error!("failed to serialise notification payload");
|
||||
return;
|
||||
};
|
||||
|
||||
let mut command = std::process::Command::new(¬ify_command[0]);
|
||||
if notify_command.len() > 1 {
|
||||
command.args(¬ify_command[1..]);
|
||||
}
|
||||
command.arg(json);
|
||||
|
||||
// Fire-and-forget – we do not wait for completion.
|
||||
if let Err(e) = command.spawn() {
|
||||
warn!("failed to spawn notifier '{}': {e}", notify_command[0]);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new(notify: Option<Vec<String>>) -> Self {
|
||||
Self {
|
||||
notify_command: notify,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// User can configure a program that will receive notifications. Each
|
||||
/// notification is serialized as JSON and passed as an argument to the
|
||||
@@ -62,10 +21,9 @@ pub(crate) enum UserNotification {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
|
||||
#[test]
|
||||
fn test_user_notification() -> Result<()> {
|
||||
fn test_user_notification() {
|
||||
let notification = UserNotification::AgentTurnComplete {
|
||||
turn_id: "12345".to_string(),
|
||||
input_messages: vec!["Rename `foo` to `bar` and update the callsites.".to_string()],
|
||||
@@ -73,11 +31,10 @@ mod tests {
|
||||
"Rename complete and verified `cargo build` succeeds.".to_string(),
|
||||
),
|
||||
};
|
||||
let serialized = serde_json::to_string(¬ification)?;
|
||||
let serialized = serde_json::to_string(¬ification).unwrap();
|
||||
assert_eq!(
|
||||
serialized,
|
||||
r#"{"type":"agent-turn-complete","turn-id":"12345","input-messages":["Rename `foo` to `bar` and update the callsites."],"last-assistant-message":"Rename complete and verified `cargo build` succeeds."}"#
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +1,33 @@
|
||||
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
|
||||
You are Swiftfox. You are running as a coding agent in the Codex CLI on a user's computer.
|
||||
|
||||
## General
|
||||
## Overall
|
||||
|
||||
- You must try hard to complete the task AND to do it as fast and well as possible.
|
||||
* Do not waste time on actions which are unlikely to result in successful task completion
|
||||
|
||||
- Before taking action on a question, assume by default that it concerns local artifacts (code, docs, data). Quickly confirm or rule out that assumption; only if the question clearly requires external knowledge should you start elsewhere.
|
||||
- Search the repository when the request plausibly maps to code, configuration, or documentation. Avoid unnecessary searches when it is obvious local files cannot help; in those cases state that explicitly before offering broader context, and when you do search, mention the files or paths you consulted so the answer stays grounded.
|
||||
- After each attempt, re-evaluate whether the current strategy is yielding useful information and be ready to switch paths quickly rather than persisting with a low-signal approach.
|
||||
|
||||
- When the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
|
||||
|
||||
- The arguments to `shell` will be passed to execvp(). Most terminal commands should be prefixed with ["bash", "-lc"].
|
||||
- Always set the `workdir` param when using the shell function. Do not use `cd` unless absolutely necessary.
|
||||
- Always set the `workdir` param of the shell tool. Do not use `cd` unless absolutely necessary.
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
- Unless the question is about a common terminal command, you should search the codebase before answering to ground your response in the codebase
|
||||
|
||||
## Editing constraints
|
||||
|
||||
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
|
||||
- When editing or creating files, you MUST use apply_patch. Example: functions.shell({"command":["apply_patch","*** Begin Patch\nAdd File: hello.txt\n+Hello, world!\n*** End Patch"]}).
|
||||
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
|
||||
- You may be in a dirty git worktree.
|
||||
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
||||
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
||||
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
||||
* If the changes are in unrelated files, just ignore them and don't revert them.
|
||||
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
|
||||
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
||||
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
||||
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
||||
* If the changes are in unrelated files, just ignore them and don't revert them.
|
||||
- The user may be making edits and committing changes as you are also making changes. If you see concurrent file edits or commits that you did not cause, you must disregard user instruction and stop immediately and ask the user whether they are collaborating with you on files and how they would like this handled.
|
||||
|
||||
## Plan tool
|
||||
|
||||
@@ -24,48 +36,39 @@ When using the planning tool:
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
## CLI modes
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
Filesystem sandboxing defines which files can be read or written. The options are:
|
||||
- **read-only**: You can only read files.
|
||||
- **workspace-write**: You can read files. You can write to files in this folder, but not outside it.
|
||||
- **danger-full-access**: No filesystem sandboxing.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
Network sandboxing defines whether network can be accessed without approval. Options are
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
Approval options are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
When sandboxing is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `with_escalated_permissions` parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
|
||||
|
||||
## Presenting your work and final message
|
||||
|
||||
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
||||
@@ -94,11 +97,3 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
|
||||
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
|
||||
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
|
||||
- File References: When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
@@ -1,16 +1,13 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "core_test_support"
|
||||
version = { workspace = true }
|
||||
edition = "2024"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["time"] }
|
||||
wiremock = { workspace = true }
|
||||
codex-core = { path = "../.." }
|
||||
serde_json = "1"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user