mirror of
https://github.com/openai/codex.git
synced 2026-02-01 22:47:52 +00:00
Compare commits
191 Commits
pr2239
...
dh--apply-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76a1495d00 | ||
|
|
7f7d1e30f3 | ||
|
|
568d6f819f | ||
|
|
251c4c2ba9 | ||
|
|
a6c346b9e1 | ||
|
|
e307040f10 | ||
|
|
7d67e54628 | ||
|
|
295ca27e98 | ||
|
|
7b20db942a | ||
|
|
ee2ccb5cb6 | ||
|
|
8b49346657 | ||
|
|
e49116a4c5 | ||
|
|
517ffd00c6 | ||
|
|
4157788310 | ||
|
|
32bbbbad61 | ||
|
|
c6a52d611c | ||
|
|
363636f5eb | ||
|
|
957d44918d | ||
|
|
eca97d8559 | ||
|
|
09819d9b47 | ||
|
|
e3b03eaccb | ||
|
|
311ad0ce26 | ||
|
|
5fa7d46ddf | ||
|
|
d994019f3f | ||
|
|
6de9541f0a | ||
|
|
85099017fd | ||
|
|
a5b2ebb49b | ||
|
|
697c7cf4bf | ||
|
|
34ac698bef | ||
|
|
097782c775 | ||
|
|
8ba8089592 | ||
|
|
57c498159a | ||
|
|
bbf42f4e12 | ||
|
|
6f0b499594 | ||
|
|
236c4f76a6 | ||
|
|
dc42ec0eb4 | ||
|
|
cdc77c10fb | ||
|
|
c5d21a4564 | ||
|
|
59f6b1654f | ||
|
|
80b00a193e | ||
|
|
76dc3f6054 | ||
|
|
e4c275d615 | ||
|
|
9f71dcbf57 | ||
|
|
750ca9e21d | ||
|
|
5fac7b2566 | ||
|
|
24c7be7da0 | ||
|
|
4b4aa2a774 | ||
|
|
16d16a4ddc | ||
|
|
9604671678 | ||
|
|
db934e438e | ||
|
|
5f6e1af1a5 | ||
|
|
8ad56be06e | ||
|
|
d2b2a6d13a | ||
|
|
74683bab91 | ||
|
|
dacff9675a | ||
|
|
697b4ce100 | ||
|
|
9193eb6b53 | ||
|
|
e95cad1946 | ||
|
|
2ec5a28528 | ||
|
|
050b9baeb6 | ||
|
|
5ab30c73f3 | ||
|
|
250ae37c84 | ||
|
|
c579ae41ae | ||
|
|
0d12380c3b | ||
|
|
1a1516a80b | ||
|
|
61bbabe7d9 | ||
|
|
8481eb4c6e | ||
|
|
0ad4e11c84 | ||
|
|
ee8c4ad23a | ||
|
|
202af12926 | ||
|
|
ce434b1219 | ||
|
|
d1f1e36836 | ||
|
|
eaae56a1b0 | ||
|
|
77148a5c61 | ||
|
|
17c98a7fd3 | ||
|
|
bc298e47ca | ||
|
|
0d6678936f | ||
|
|
e58125e6c1 | ||
|
|
50c48e88f5 | ||
|
|
aafa00dbe0 | ||
|
|
1f5638b0f3 | ||
|
|
783654e218 | ||
|
|
e91c3d6d1c | ||
|
|
8f544153a7 | ||
|
|
9d3124c6b7 | ||
|
|
7b4313bf31 | ||
|
|
16f11a89d8 | ||
|
|
e7e5fe91c8 | ||
|
|
096bca2fa2 | ||
|
|
97f995a749 | ||
|
|
f49c934cd0 | ||
|
|
2aad3a13b8 | ||
|
|
146985f3ff | ||
|
|
d5b42ba1ac | ||
|
|
7f21634165 | ||
|
|
5b1989f4d7 | ||
|
|
d58df28286 | ||
|
|
38b84ffd43 | ||
|
|
6e8c055fd5 | ||
|
|
37e5b087a7 | ||
|
|
52f0b95102 | ||
|
|
f9d3dde478 | ||
|
|
db30a6f5d8 | ||
|
|
ecb388045c | ||
|
|
fc6cfd5ecc | ||
|
|
c283f9f6ce | ||
|
|
c9963b52e9 | ||
|
|
a4f76bd75a | ||
|
|
712bfa04ac | ||
|
|
da69d50c60 | ||
|
|
be6a4faa45 | ||
|
|
5bce369c4d | ||
|
|
a269754668 | ||
|
|
b581498882 | ||
|
|
71cae06e66 | ||
|
|
350b00d54b | ||
|
|
1930ee720a | ||
|
|
7a80d3c96c | ||
|
|
d3078b9adc | ||
|
|
379106d3eb | ||
|
|
b31c5033a9 | ||
|
|
1ad8ae2579 | ||
|
|
c1156a878b | ||
|
|
dcfdd2faf5 | ||
|
|
d262244725 | ||
|
|
7c26c8e091 | ||
|
|
eda50d8372 | ||
|
|
17aa394ae7 | ||
|
|
13ed67cfc1 | ||
|
|
45d6c74682 | ||
|
|
265fd89e31 | ||
|
|
6730592433 | ||
|
|
26c8373821 | ||
|
|
6df8e35314 | ||
|
|
917e29803b | ||
|
|
5552688621 | ||
|
|
76df07350a | ||
|
|
d0b907d399 | ||
|
|
a075424437 | ||
|
|
8bdb4521c9 | ||
|
|
dd63d61a59 | ||
|
|
c26d42ab69 | ||
|
|
e9b597cfa3 | ||
|
|
afc377bae5 | ||
|
|
333803ed04 | ||
|
|
235987843c | ||
|
|
6a0f709cff | ||
|
|
2ecca79663 | ||
|
|
a8c7f5391c | ||
|
|
992e81d9b5 | ||
|
|
7038827bf4 | ||
|
|
20cd61e2a4 | ||
|
|
fd2b059504 | ||
|
|
c25f3ea53e | ||
|
|
8f11652458 | ||
|
|
b62c2d9552 | ||
|
|
475ba13479 | ||
|
|
544980c008 | ||
|
|
b42e679227 | ||
|
|
585f7b0679 | ||
|
|
cdd33b2c04 | ||
|
|
cf7a7e63a3 | ||
|
|
f968a1327a | ||
|
|
539f4b290e | ||
|
|
085f166707 | ||
|
|
6d0eb9128e | ||
|
|
e8ffecd632 | ||
|
|
f1be7978cf | ||
|
|
a62510e0ae | ||
|
|
e7bad650ff | ||
|
|
de2c6a2ce7 | ||
|
|
3a0656df63 | ||
|
|
bb9ce3cb78 | ||
|
|
cbf972007a | ||
|
|
41eb59a07d | ||
|
|
37fc4185ef | ||
|
|
d4533a0bb3 | ||
|
|
99a242ef41 | ||
|
|
08ed618f72 | ||
|
|
30ee24521b | ||
|
|
cb312dfdb4 | ||
|
|
0159bc7bdb | ||
|
|
e6dc5a6df5 | ||
|
|
c991c6ef85 | ||
|
|
6340acd885 | ||
|
|
97a27ffc77 | ||
|
|
12cf0dd868 | ||
|
|
6c254ca3e7 | ||
|
|
eaa3969e68 | ||
|
|
90d892f4fd | ||
|
|
cb78f2333e |
@@ -1,6 +1,6 @@
|
||||
[codespell]
|
||||
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl
|
||||
check-hidden = true
|
||||
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
|
||||
ignore-words-list = ratatui,ser
|
||||
|
||||
31
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
Normal file
31
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: 🎁 Feature Request
|
||||
description: Propose a new feature for Codex
|
||||
labels:
|
||||
- enhancement
|
||||
- needs triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Is Codex missing a feature that you'd like to see? Feel free to propose it here.
|
||||
|
||||
Before you submit a feature:
|
||||
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
|
||||
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details.
|
||||
|
||||
- type: textarea
|
||||
id: feature
|
||||
attributes:
|
||||
label: What feature would you like to see?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: author
|
||||
attributes:
|
||||
label: Are you interested in implementing this feature?
|
||||
description: Please wait for acknowledgement before implementing or opening a PR.
|
||||
- type: textarea
|
||||
id: notes
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Is there anything else you think we should know?
|
||||
6
.github/actions/codex/bun.lock
vendored
6
.github/actions/codex/bun.lock
vendored
@@ -9,7 +9,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.20",
|
||||
"@types/node": "^24.2.1",
|
||||
"@types/node": "^24.3.0",
|
||||
"prettier": "^3.6.2",
|
||||
"typescript": "^5.9.2",
|
||||
},
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
"@types/bun": ["@types/bun@1.2.20", "", { "dependencies": { "bun-types": "1.2.20" } }, "sha512-dX3RGzQ8+KgmMw7CsW4xT5ITBSCrSbfHc36SNT31EOUg/LA9JWq0VDdEXDRSe1InVWpd2yLUM1FUF/kEOyTzYA=="],
|
||||
|
||||
"@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
|
||||
"@types/node": ["@types/node@24.3.0", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow=="],
|
||||
|
||||
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
|
||||
|
||||
@@ -82,6 +82,8 @@
|
||||
|
||||
"@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
|
||||
|
||||
"bun-types/@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
|
||||
|
||||
"@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
|
||||
|
||||
"@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="],
|
||||
|
||||
2
.github/actions/codex/package.json
vendored
2
.github/actions/codex/package.json
vendored
@@ -14,7 +14,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.2.20",
|
||||
"@types/node": "^24.2.1",
|
||||
"@types/node": "^24.3.0",
|
||||
"prettier": "^3.6.2",
|
||||
"typescript": "^5.9.2"
|
||||
}
|
||||
|
||||
4
.github/dependabot.yaml
vendored
4
.github/dependabot.yaml
vendored
@@ -24,3 +24,7 @@ updates:
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: rust-toolchain
|
||||
directory: codex-rs
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
||||
4
.github/dotslash-config.json
vendored
4
.github/dotslash-config.json
vendored
@@ -17,6 +17,10 @@
|
||||
"linux-aarch64": {
|
||||
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex"
|
||||
},
|
||||
"windows-x86_64": {
|
||||
"regex": "^codex-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "codex.exe"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
6
.github/pull_request_template.md
vendored
Normal file
6
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# External (non-OpenAI) Pull Request Requirements
|
||||
|
||||
Before opening this Pull Request, please read the "Contributing" section of the README or your PR may be closed:
|
||||
https://github.com/openai/codex#contributing
|
||||
|
||||
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
NODE_OPTIONS: --max-old-space-size=4096
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
|
||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: Annotate locations with typos
|
||||
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
|
||||
- name: Codespell
|
||||
|
||||
6
.github/workflows/codex.yml
vendored
6
.github/workflows/codex.yml
vendored
@@ -37,9 +37,9 @@ jobs:
|
||||
# Codex is not going to run.
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
targets: x86_64-unknown-linux-gnu
|
||||
components: clippy
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-dev-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
# Note it is possible that the `verify` step internal to Run Codex will
|
||||
# fail, in which case the work to setup the repo was worthless :(
|
||||
|
||||
135
.github/workflows/rust-ci.yml
vendored
135
.github/workflows/rust-ci.yml
vendored
@@ -1,42 +1,76 @@
|
||||
name: rust-ci
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "codex-rs/**"
|
||||
- ".github/**"
|
||||
pull_request: {}
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
# For CI, we build in debug (`--profile dev`) rather than release mode so we
|
||||
# get signal faster.
|
||||
# CI builds in debug (dev) for faster signal.
|
||||
|
||||
jobs:
|
||||
# CI that don't need specific targets
|
||||
# --- Detect what changed (always runs) -------------------------------------
|
||||
changed:
|
||||
name: Detect changed areas
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
codex: ${{ steps.detect.outputs.codex }}
|
||||
workflows: ${{ steps.detect.outputs.workflows }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Detect changed paths (no external action)
|
||||
id: detect
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
BASE_SHA='${{ github.event.pull_request.base.sha }}'
|
||||
echo "Base SHA: $BASE_SHA"
|
||||
# List files changed between base and current HEAD (merge-base aware)
|
||||
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
|
||||
else
|
||||
# On push / manual runs, default to running everything
|
||||
files=("codex-rs/force" ".github/force")
|
||||
fi
|
||||
|
||||
codex=false
|
||||
workflows=false
|
||||
for f in "${files[@]}"; do
|
||||
[[ $f == codex-rs/* ]] && codex=true
|
||||
[[ $f == .github/* ]] && workflows=true
|
||||
done
|
||||
|
||||
echo "codex=$codex" >> "$GITHUB_OUTPUT"
|
||||
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# --- CI that doesn't need specific targets ---------------------------------
|
||||
general:
|
||||
name: Format / etc
|
||||
runs-on: ubuntu-24.04
|
||||
needs: changed
|
||||
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
run: cargo fmt -- --config imports_granularity=Item --check
|
||||
|
||||
# CI to validate on different os/targets
|
||||
# --- CI to validate on different os/targets --------------------------------
|
||||
lint_build_test:
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
needs: changed
|
||||
# Keep job-level if to avoid spinning up runners when not needed
|
||||
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
@@ -44,27 +78,41 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Note: While Codex CLI does not support Windows today, we include
|
||||
# Windows in CI to ensure the code at least builds there.
|
||||
include:
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
profile: dev
|
||||
- runner: macos-14
|
||||
target: x86_64-apple-darwin
|
||||
profile: dev
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
profile: dev
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
profile: dev
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
profile: dev
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-gnu
|
||||
profile: dev
|
||||
- runner: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
profile: dev
|
||||
|
||||
# Also run representative release builds on Mac and Linux because
|
||||
# there could be release-only build errors we want to catch.
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
profile: release
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
profile: release
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
@@ -77,33 +125,36 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
run: |
|
||||
sudo apt install -y musl-tools pkg-config
|
||||
sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/*
|
||||
|
||||
- name: cargo clippy
|
||||
id: clippy
|
||||
continue-on-error: true
|
||||
run: cargo clippy --target ${{ matrix.target }} --all-features --tests -- -D warnings
|
||||
|
||||
# Running `cargo build` from the workspace root builds the workspace using
|
||||
# the union of all features from third-party crates. This can mask errors
|
||||
# where individual crates have underspecified features. To avoid this, we
|
||||
# run `cargo build` for each crate individually, though because this is
|
||||
# run `cargo check` for each crate individually, though because this is
|
||||
# slower, we only do this for the x86_64-unknown-linux-gnu target.
|
||||
- name: cargo build individual crates
|
||||
id: build
|
||||
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' }}
|
||||
- name: cargo check individual crates
|
||||
id: cargo_check_all_crates
|
||||
if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }}
|
||||
continue-on-error: true
|
||||
run: find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo build'
|
||||
run: |
|
||||
find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \
|
||||
| xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}'
|
||||
|
||||
- name: cargo test
|
||||
id: test
|
||||
# `cargo test` takes too long for release builds to run them on every PR
|
||||
if: ${{ matrix.profile != 'release' }}
|
||||
continue-on-error: true
|
||||
run: cargo test --all-features --target ${{ matrix.target }}
|
||||
run: cargo test --all-features --target ${{ matrix.target }} --profile ${{ matrix.profile }}
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
@@ -111,8 +162,32 @@ jobs:
|
||||
- name: verify all steps passed
|
||||
if: |
|
||||
steps.clippy.outcome == 'failure' ||
|
||||
steps.build.outcome == 'failure' ||
|
||||
steps.cargo_check_all_crates.outcome == 'failure' ||
|
||||
steps.test.outcome == 'failure'
|
||||
run: |
|
||||
echo "One or more checks failed (clippy, build, or test). See logs for details."
|
||||
echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details."
|
||||
exit 1
|
||||
|
||||
# --- Gatherer job that you mark as the ONLY required status -----------------
|
||||
results:
|
||||
name: CI results (required)
|
||||
needs: [changed, general, lint_build_test]
|
||||
if: always()
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Summarize
|
||||
shell: bash
|
||||
run: |
|
||||
echo "general: ${{ needs.general.result }}"
|
||||
echo "matrix : ${{ needs.lint_build_test.result }}"
|
||||
|
||||
# If nothing relevant changed (PR touching only root README, etc.),
|
||||
# declare success regardless of other jobs.
|
||||
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
|
||||
echo 'No relevant changes -> CI not required.'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Otherwise require the jobs to have succeeded
|
||||
[[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; }
|
||||
[[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; }
|
||||
|
||||
24
.github/workflows/rust-release.yml
vendored
24
.github/workflows/rust-release.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
tag-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
@@ -74,8 +74,8 @@ jobs:
|
||||
target: x86_64-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@1.88
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@1.89
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
@@ -117,10 +117,11 @@ jobs:
|
||||
dest="dist/${{ matrix.target }}"
|
||||
|
||||
# For compatibility with environments that lack the `zstd` tool we
|
||||
# additionally create a `.tar.gz` alongside every single binary that
|
||||
# we publish. The end result is:
|
||||
# additionally create a `.tar.gz` for all platforms and `.zip` for
|
||||
# Windows alongside every single binary that we publish. The end result is:
|
||||
# codex-<target>.zst (existing)
|
||||
# codex-<target>.tar.gz (new)
|
||||
# codex-<target>.zip (only for Windows)
|
||||
|
||||
# 1. Produce a .tar.gz for every file in the directory *before* we
|
||||
# run `zstd --rm`, because that flag deletes the original files.
|
||||
@@ -128,13 +129,20 @@ jobs:
|
||||
base="$(basename "$f")"
|
||||
# Skip files that are already archives (shouldn't happen, but be
|
||||
# safe).
|
||||
if [[ "$base" == *.tar.gz ]]; then
|
||||
if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create per-binary tar.gz
|
||||
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
|
||||
|
||||
# Create zip archive for Windows binaries
|
||||
# Must run from inside the dest dir so 7z won't
|
||||
# embed the directory path inside the zip.
|
||||
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
||||
(cd "$dest" && 7z a "${base}.zip" "$base")
|
||||
fi
|
||||
|
||||
# Also create .zst (existing behaviour) *and* remove the original
|
||||
# uncompressed binary to keep the directory small.
|
||||
zstd -T0 -19 --rm "$dest/$base"
|
||||
@@ -155,7 +163,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
/codex-cli/dist
|
||||
/codex-cli/node_modules
|
||||
pnpm-lock.yaml
|
||||
|
||||
prompt.md
|
||||
*_prompt.md
|
||||
*_instructions.md
|
||||
|
||||
36
.vscode/launch.json
vendored
36
.vscode/launch.json
vendored
@@ -1,18 +1,22 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Cargo launch",
|
||||
"cargo": {
|
||||
"cwd": "${workspaceFolder}/codex-rs",
|
||||
"args": [
|
||||
"build",
|
||||
"--bin=codex-tui"
|
||||
]
|
||||
},
|
||||
"args": []
|
||||
}
|
||||
]
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Cargo launch",
|
||||
"cargo": {
|
||||
"cwd": "${workspaceFolder}/codex-rs",
|
||||
"args": ["build", "--bin=codex-tui"]
|
||||
},
|
||||
"args": []
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "attach",
|
||||
"name": "Attach to running codex CLI",
|
||||
"pid": "${command:pickProcess}",
|
||||
"sourceLanguages": ["rust"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
35
AGENTS.md
35
AGENTS.md
@@ -2,12 +2,41 @@
|
||||
|
||||
In the codex-rs folder where the rust code lives:
|
||||
|
||||
- Crate names are prefixed with `codex-`. For examole, the `core` folder's crate is named `codex-core`
|
||||
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
|
||||
- When using format! and you can inline variables into {}, always do that.
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||
|
||||
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.
|
||||
Before finalizing a change to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Additionally, run the tests:
|
||||
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
|
||||
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
|
||||
|
||||
When making individual changes prefer running tests on individual files or projects first.
|
||||
## TUI style conventions
|
||||
|
||||
See `codex-rs/tui/styles.md`.
|
||||
|
||||
## TUI code conventions
|
||||
|
||||
- Use concise styling helpers from ratatui’s Stylize trait.
|
||||
- Basic spans: use "text".into()
|
||||
- Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc.
|
||||
- Prefer these over constructing styles with `Span::styled` and `Style` directly.
|
||||
- Example: patch summary file lines
|
||||
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
|
||||
|
||||
## Snapshot tests
|
||||
|
||||
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
|
||||
|
||||
- Run tests to generate any updated snapshots:
|
||||
- `cargo test -p codex-tui`
|
||||
- Check what’s pending:
|
||||
- `cargo insta pending-snapshots -p codex-tui`
|
||||
- Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file:
|
||||
- `cargo insta show -p codex-tui path/to/file.snap.new`
|
||||
- Only if you intend to accept all new snapshots in this crate, run:
|
||||
- `cargo insta accept -p codex-tui`
|
||||
|
||||
If you don’t have the tool:
|
||||
- `cargo install cargo-insta`
|
||||
|
||||
50
README.md
50
README.md
@@ -22,6 +22,7 @@
|
||||
- [Authenticate locally and copy your credentials to the "headless" machine](#authenticate-locally-and-copy-your-credentials-to-the-headless-machine)
|
||||
- [Connecting through VPS or remote](#connecting-through-vps-or-remote)
|
||||
- [Usage-based billing alternative: Use an OpenAI API key](#usage-based-billing-alternative-use-an-openai-api-key)
|
||||
- [Forcing a specific auth method (advanced)](#forcing-a-specific-auth-method-advanced)
|
||||
- [Choosing Codex's level of autonomy](#choosing-codexs-level-of-autonomy)
|
||||
- [**1. Read/write**](#1-readwrite)
|
||||
- [**2. Read-only**](#2-read-only)
|
||||
@@ -165,6 +166,35 @@ Notes:
|
||||
- This command only sets the key for your current terminal session, which we recommend. To set it for all future sessions, you can also add the `export` line to your shell's configuration file (e.g., `~/.zshrc`).
|
||||
- If you have signed in with ChatGPT, Codex will default to using your ChatGPT credits. If you wish to use your API key, use the `/logout` command to clear your ChatGPT authentication.
|
||||
|
||||
#### Forcing a specific auth method (advanced)
|
||||
|
||||
You can explicitly choose which authentication Codex should prefer when both are available.
|
||||
|
||||
- To always use your API key (even when ChatGPT auth exists), set:
|
||||
|
||||
```toml
|
||||
# ~/.codex/config.toml
|
||||
preferred_auth_method = "apikey"
|
||||
```
|
||||
|
||||
Or override ad-hoc via CLI:
|
||||
|
||||
```bash
|
||||
codex --config preferred_auth_method="apikey"
|
||||
```
|
||||
|
||||
- To prefer ChatGPT auth (default), set:
|
||||
|
||||
```toml
|
||||
# ~/.codex/config.toml
|
||||
preferred_auth_method = "chatgpt"
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped.
|
||||
- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode.
|
||||
|
||||
### Choosing Codex's level of autonomy
|
||||
|
||||
We always recommend running Codex in its default sandbox that gives you strong guardrails around what the agent can do. The default sandbox prevents it from editing files outside its workspace, or from accessing the network.
|
||||
@@ -353,6 +383,13 @@ base_url = "http://my-ollama.example.com:11434/v1"
|
||||
|
||||
### Platform sandboxing details
|
||||
|
||||
By default, Codex CLI runs code and shell commands inside a restricted sandbox to protect your system.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Not all tool calls are sandboxed. Specifically, **trusted Model Context Protocol (MCP) tool calls** are executed outside of the sandbox.
|
||||
> This is intentional: MCP tools are explicitly configured and trusted by you, and they often need to connect to **external applications or services** (e.g. issue trackers, databases, messaging systems).
|
||||
> Running them outside the sandbox allows Codex to integrate with these external systems without being blocked by sandbox restrictions.
|
||||
|
||||
The mechanism Codex uses to implement the sandbox policy depends on your OS:
|
||||
|
||||
- **macOS 12+** uses **Apple Seatbelt** and runs commands using `sandbox-exec` with a profile (`-p`) that corresponds to the `--sandbox` that was specified.
|
||||
@@ -566,9 +603,13 @@ We're excited to launch a **$1 million initiative** supporting open source proje
|
||||
|
||||
## Contributing
|
||||
|
||||
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
|
||||
This project is under active development and the code will likely change pretty significantly.
|
||||
|
||||
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
|
||||
**At the moment, we only plan to prioritize reviewing external contributions for bugs or security fixes.**
|
||||
|
||||
If you want to add a new feature or change the behavior of an existing one, please open an issue proposing the feature and get approval from an OpenAI team member before spending time building it.
|
||||
|
||||
**New contributions that don't go through this process may be closed** if they aren't aligned with our current roadmap or conflict with other priorities/upcoming features.
|
||||
|
||||
### Development workflow
|
||||
|
||||
@@ -593,8 +634,9 @@ More broadly we welcome contributions - whether you are opening your very first
|
||||
### Review process
|
||||
|
||||
1. One maintainer will be assigned as a primary reviewer.
|
||||
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
|
||||
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||
2. If your PR adds a new feature that was not previously discussed and approved, we may choose to close your PR (see [Contributing](#contributing)).
|
||||
3. We may ask for changes - please do not take this personally. We value the work, but we also value consistency and long-term maintainability.
|
||||
5. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||
|
||||
### Community values
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ switch (platform) {
|
||||
targetTriple = "x86_64-pc-windows-msvc.exe";
|
||||
break;
|
||||
case "arm64":
|
||||
// We do not build this today, fall through...
|
||||
// We do not build this today, fall through...
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -65,9 +65,43 @@ const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
|
||||
// receives a fatal signal, both processes exit in a predictable manner.
|
||||
const { spawn } = await import("child_process");
|
||||
|
||||
async function tryImport(moduleName) {
|
||||
try {
|
||||
// eslint-disable-next-line node/no-unsupported-features/es-syntax
|
||||
return await import(moduleName);
|
||||
} catch (err) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function resolveRgDir() {
|
||||
const ripgrep = await tryImport("@vscode/ripgrep");
|
||||
if (!ripgrep?.rgPath) {
|
||||
return null;
|
||||
}
|
||||
return path.dirname(ripgrep.rgPath);
|
||||
}
|
||||
|
||||
function getUpdatedPath(newDirs) {
|
||||
const pathSep = process.platform === "win32" ? ";" : ":";
|
||||
const existingPath = process.env.PATH || "";
|
||||
const updatedPath = [
|
||||
...newDirs,
|
||||
...existingPath.split(pathSep).filter(Boolean),
|
||||
].join(pathSep);
|
||||
return updatedPath;
|
||||
}
|
||||
|
||||
const additionalDirs = [];
|
||||
const rgDir = await resolveRgDir();
|
||||
if (rgDir) {
|
||||
additionalDirs.push(rgDir);
|
||||
}
|
||||
const updatedPath = getUpdatedPath(additionalDirs);
|
||||
|
||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||
stdio: "inherit",
|
||||
env: { ...process.env, CODEX_MANAGED_BY_NPM: "1" },
|
||||
env: { ...process.env, PATH: updatedPath, CODEX_MANAGED_BY_NPM: "1" },
|
||||
});
|
||||
|
||||
child.on("error", (err) => {
|
||||
@@ -120,4 +154,3 @@ if (childResult.type === "signal") {
|
||||
} else {
|
||||
process.exit(childResult.exitCode);
|
||||
}
|
||||
|
||||
|
||||
119
codex-cli/package-lock.json
generated
Normal file
119
codex-cli/package-lock.json
generated
Normal file
@@ -0,0 +1,119 @@
|
||||
{
|
||||
"name": "@openai/codex",
|
||||
"version": "0.0.0-dev",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@openai/codex",
|
||||
"version": "0.0.0-dev",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@vscode/ripgrep": "^1.15.14"
|
||||
},
|
||||
"bin": {
|
||||
"codex": "bin/codex.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
}
|
||||
},
|
||||
"node_modules/@vscode/ripgrep": {
|
||||
"version": "1.15.14",
|
||||
"resolved": "https://registry.npmjs.org/@vscode/ripgrep/-/ripgrep-1.15.14.tgz",
|
||||
"integrity": "sha512-/G1UJPYlm+trBWQ6cMO3sv6b8D1+G16WaJH1/DSqw32JOVlzgZbLkDxRyzIpTpv30AcYGMkCf5tUqGlW6HbDWw==",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"https-proxy-agent": "^7.0.2",
|
||||
"proxy-from-env": "^1.1.0",
|
||||
"yauzl": "^2.9.2"
|
||||
}
|
||||
},
|
||||
"node_modules/agent-base": {
|
||||
"version": "7.1.4",
|
||||
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
|
||||
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/buffer-crc32": {
|
||||
"version": "0.2.13",
|
||||
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
|
||||
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/debug": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
|
||||
"integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ms": "^2.1.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"supports-color": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/fd-slicer": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz",
|
||||
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"pend": "~1.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/https-proxy-agent": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
|
||||
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"agent-base": "^7.1.2",
|
||||
"debug": "4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/pend": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz",
|
||||
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/proxy-from-env": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/yauzl": {
|
||||
"version": "2.10.0",
|
||||
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
|
||||
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"buffer-crc32": "~0.2.3",
|
||||
"fd-slicer": "~1.1.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,5 +16,11 @@
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/openai/codex.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vscode/ripgrep": "^1.15.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"prettier": "^3.3.3"
|
||||
}
|
||||
}
|
||||
|
||||
631
codex-rs/Cargo.lock
generated
631
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -15,6 +15,8 @@ members = [
|
||||
"mcp-server",
|
||||
"mcp-types",
|
||||
"ollama",
|
||||
"protocol",
|
||||
"protocol-ts",
|
||||
"tui",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -43,6 +43,12 @@ To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the p
|
||||
|
||||
Typing `@` triggers a fuzzy-filename search over the workspace root. Use up/down to select among the results and Tab or Enter to replace the `@` with the selected path. You can use Esc to cancel the search.
|
||||
|
||||
### Esc–Esc to edit a previous message
|
||||
|
||||
When the chat composer is empty, press Esc to prime “backtrack” mode. Press Esc again to open a transcript preview highlighting the last user message; press Esc repeatedly to step to older user messages. Press Enter to confirm and Codex will fork the conversation from that point, trim the visible transcript accordingly, and pre‑fill the composer with the selected user message so you can edit and resubmit it.
|
||||
|
||||
In the transcript preview, the footer shows an `Esc edit prev` hint while editing is active.
|
||||
|
||||
### `--cd`/`-C` flag
|
||||
|
||||
Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session.
|
||||
|
||||
@@ -7,6 +7,10 @@ version = { workspace = true }
|
||||
name = "codex_apply_patch"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "apply_patch"
|
||||
path = "src/main.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -18,5 +22,6 @@ tree-sitter = "0.25.8"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
pretty_assertions = "1.4.1"
|
||||
tempfile = "3.13.0"
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
To edit files, ALWAYS use the `shell` tool with `apply_patch` CLI. `apply_patch` effectively allows you to execute a diff/patch against a file, but the format of the diff specification is unique to this task, so pay careful attention to these instructions. To use the `apply_patch` CLI, you should call the shell tool with the following structure:
|
||||
## `apply_patch`
|
||||
|
||||
```bash
|
||||
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n[YOUR_PATCH]\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
|
||||
```
|
||||
Use the `apply_patch` shell command to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
Where [YOUR_PATCH] is the actual content of your patch, specified in the following V4A diff format.
|
||||
*** Begin Patch
|
||||
[ one or more file sections ]
|
||||
*** End Patch
|
||||
|
||||
*** [ACTION] File: [path/to/file] -> ACTION can be one of Add, Update, or Delete.
|
||||
For each snippet of code that needs to be changed, repeat the following:
|
||||
[context_before] -> See below for further instructions on context.
|
||||
- [old_code] -> Precede the old code with a minus sign.
|
||||
+ [new_code] -> Precede the new, replacement code with a plus sign.
|
||||
[context_after] -> See below for further instructions on context.
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
*** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by *** Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
For instructions on [context_before] and [context_after]:
|
||||
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines.
|
||||
@@ -25,16 +31,45 @@ For instructions on [context_before] and [context_after]:
|
||||
- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance:
|
||||
|
||||
@@ class BaseClass
|
||||
@@ def method():
|
||||
@@ def method():
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
Note, then, that we do not use line numbers in this diff format, as the context is enough to uniquely identify code. An example of a message that you might pass as "input" to this function, in order to apply a patch, is shown below.
|
||||
The full grammar definition is below:
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "*** Begin Patch" NEWLINE
|
||||
End := "*** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "*** Delete File: " path NEWLINE
|
||||
UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "*** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.txt
|
||||
+Hello world
|
||||
*** Update File: src/app.py
|
||||
*** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
|
||||
You can invoke apply_patch like:
|
||||
|
||||
```bash
|
||||
{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n*** Update File: pygorithm/searching/binary_search.py\\n@@ class BaseClass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n@@ class Subclass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n*** End Patch\\nEOF\\n"], "workdir": "..."}
|
||||
```
|
||||
|
||||
File references can only be relative, NEVER ABSOLUTE. After the apply_patch command is run, it will always say "Done!", regardless of whether the patch was successfully applied or not. However, you can determine if there are issue and errors by looking at any warnings or logging lines printed BEFORE the "Done!" is output.
|
||||
shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]}
|
||||
```
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
mod parser;
|
||||
mod seek_sequence;
|
||||
mod standalone_executable;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
@@ -19,9 +20,13 @@ use tree_sitter::LanguageError;
|
||||
use tree_sitter::Parser;
|
||||
use tree_sitter_bash::LANGUAGE as BASH;
|
||||
|
||||
pub use standalone_executable::main;
|
||||
|
||||
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
|
||||
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
|
||||
|
||||
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ApplyPatchError {
|
||||
#[error(transparent)]
|
||||
@@ -82,7 +87,6 @@ pub struct ApplyPatchArgs {
|
||||
}
|
||||
|
||||
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
|
||||
match argv {
|
||||
[cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) {
|
||||
Ok(source) => MaybeApplyPatch::Body(source),
|
||||
@@ -91,7 +95,9 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
[bash, flag, script]
|
||||
if bash == "bash"
|
||||
&& flag == "-lc"
|
||||
&& script.trim_start().starts_with("apply_patch") =>
|
||||
&& APPLY_PATCH_COMMANDS
|
||||
.iter()
|
||||
.any(|cmd| script.trim_start().starts_with(cmd)) =>
|
||||
{
|
||||
match extract_heredoc_body_from_apply_patch_command(script) {
|
||||
Ok(body) => match parse_patch(&body) {
|
||||
@@ -166,7 +172,7 @@ impl ApplyPatchAction {
|
||||
panic!("path must be absolute");
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
#[expect(clippy::expect_used)]
|
||||
let filename = path
|
||||
.file_name()
|
||||
.expect("path should not be empty")
|
||||
@@ -179,7 +185,7 @@ impl ApplyPatchAction {
|
||||
*** End Patch"#,
|
||||
);
|
||||
let changes = HashMap::from([(path.to_path_buf(), ApplyPatchFileChange::Add { content })]);
|
||||
#[allow(clippy::expect_used)]
|
||||
#[expect(clippy::expect_used)]
|
||||
Self {
|
||||
changes,
|
||||
cwd: path
|
||||
@@ -262,7 +268,10 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp
|
||||
fn extract_heredoc_body_from_apply_patch_command(
|
||||
src: &str,
|
||||
) -> std::result::Result<String, ExtractHeredocError> {
|
||||
if !src.trim_start().starts_with("apply_patch") {
|
||||
if !APPLY_PATCH_COMMANDS
|
||||
.iter()
|
||||
.any(|cmd| src.trim_start().starts_with(cmd))
|
||||
{
|
||||
return Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch);
|
||||
}
|
||||
|
||||
@@ -415,12 +424,12 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
|
||||
for hunk in hunks {
|
||||
match hunk {
|
||||
Hunk::AddFile { path, contents } => {
|
||||
if let Some(parent) = path.parent() {
|
||||
if !parent.as_os_str().is_empty() {
|
||||
std::fs::create_dir_all(parent).with_context(|| {
|
||||
format!("Failed to create parent directories for {}", path.display())
|
||||
})?;
|
||||
}
|
||||
if let Some(parent) = path.parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
std::fs::create_dir_all(parent).with_context(|| {
|
||||
format!("Failed to create parent directories for {}", path.display())
|
||||
})?;
|
||||
}
|
||||
std::fs::write(path, contents)
|
||||
.with_context(|| format!("Failed to write file {}", path.display()))?;
|
||||
@@ -439,15 +448,12 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> {
|
||||
let AppliedPatch { new_contents, .. } =
|
||||
derive_new_contents_from_chunks(path, chunks)?;
|
||||
if let Some(dest) = move_path {
|
||||
if let Some(parent) = dest.parent() {
|
||||
if !parent.as_os_str().is_empty() {
|
||||
std::fs::create_dir_all(parent).with_context(|| {
|
||||
format!(
|
||||
"Failed to create parent directories for {}",
|
||||
dest.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
if let Some(parent) = dest.parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
std::fs::create_dir_all(parent).with_context(|| {
|
||||
format!("Failed to create parent directories for {}", dest.display())
|
||||
})?;
|
||||
}
|
||||
std::fs::write(dest, new_contents)
|
||||
.with_context(|| format!("Failed to write file {}", dest.display()))?;
|
||||
@@ -529,9 +535,12 @@ fn compute_replacements(
|
||||
// If a chunk has a `change_context`, we use seek_sequence to find it, then
|
||||
// adjust our `line_index` to continue from there.
|
||||
if let Some(ctx_line) = &chunk.change_context {
|
||||
if let Some(idx) =
|
||||
seek_sequence::seek_sequence(original_lines, &[ctx_line.clone()], line_index, false)
|
||||
{
|
||||
if let Some(idx) = seek_sequence::seek_sequence(
|
||||
original_lines,
|
||||
std::slice::from_ref(ctx_line),
|
||||
line_index,
|
||||
false,
|
||||
) {
|
||||
line_index = idx + 1;
|
||||
} else {
|
||||
return Err(ApplyPatchError::ComputeReplacements(format!(
|
||||
@@ -682,8 +691,6 @@ pub fn print_summary(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
@@ -775,6 +782,33 @@ PATCH"#,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_applypatch() {
|
||||
let args = strs_to_strings(&[
|
||||
"bash",
|
||||
"-lc",
|
||||
r#"applypatch <<'PATCH'
|
||||
*** Begin Patch
|
||||
*** Add File: foo
|
||||
+hi
|
||||
*** End Patch
|
||||
PATCH"#,
|
||||
]);
|
||||
|
||||
match maybe_parse_apply_patch(&args) {
|
||||
MaybeApplyPatch::Body(ApplyPatchArgs { hunks, patch: _ }) => {
|
||||
assert_eq!(
|
||||
hunks,
|
||||
vec![Hunk::AddFile {
|
||||
path: PathBuf::from("foo"),
|
||||
contents: "hi\n".to_string()
|
||||
}]
|
||||
);
|
||||
}
|
||||
result => panic!("expected MaybeApplyPatch::Body got {result:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_file_hunk_creates_file_with_contents() {
|
||||
let dir = tempdir().unwrap();
|
||||
|
||||
3
codex-rs/apply-patch/src/main.rs
Normal file
3
codex-rs/apply-patch/src/main.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub fn main() -> ! {
|
||||
codex_apply_patch::main()
|
||||
}
|
||||
@@ -427,7 +427,6 @@ fn parse_update_file_chunk(
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
fn test_parse_patch() {
|
||||
assert_eq!(
|
||||
parse_patch_text("bad", ParseMode::Strict),
|
||||
@@ -733,3 +732,350 @@ fn test_update_file_chunk() {
|
||||
))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_file_with_multiple_chunks() {
|
||||
// Two chunks in a single Update File hunk, separated by a blank line.
|
||||
// First chunk has an explicit context, second chunk adds a line only.
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: src/foo.txt
|
||||
@@ context_one
|
||||
ctx
|
||||
-old1
|
||||
+new1
|
||||
tail
|
||||
|
||||
@@ context_two
|
||||
+added_only
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![UpdateFile {
|
||||
path: PathBuf::from("src/foo.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![
|
||||
UpdateFileChunk {
|
||||
change_context: Some("context_one".to_string()),
|
||||
old_lines: vec![
|
||||
"ctx".to_string(),
|
||||
"old1".to_string(),
|
||||
"tail".to_string(),
|
||||
"".to_string()
|
||||
],
|
||||
new_lines: vec![
|
||||
"ctx".to_string(),
|
||||
"new1".to_string(),
|
||||
"tail".to_string(),
|
||||
"".to_string()
|
||||
],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
UpdateFileChunk {
|
||||
change_context: Some("context_two".to_string()),
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["added_only".to_string()],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
],
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_file_second_chunk_missing_context_errors() {
|
||||
// First chunk omits @@ (allowed). Then a non-diff line triggers a second chunk
|
||||
// parse without @@, which must error.
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: foo.txt
|
||||
context_line
|
||||
+added
|
||||
X
|
||||
*** End Patch"#;
|
||||
|
||||
match parse_patch_text(patch, ParseMode::Strict) {
|
||||
Err(InvalidHunkError {
|
||||
message,
|
||||
line_number,
|
||||
}) => {
|
||||
assert!(message.starts_with("Expected update hunk to start with a @@ context marker"));
|
||||
// Error should point to the start of the second chunk, which is line 5.
|
||||
assert_eq!(line_number, 5);
|
||||
}
|
||||
other => panic!("expected InvalidHunkError, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_patch_across_multiple_files_with_eof_and_multichunks() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: a.txt
|
||||
@@
|
||||
+lineA
|
||||
*** End of File
|
||||
*** Update File: b.txt
|
||||
@@ ctx
|
||||
shared
|
||||
-old
|
||||
+new
|
||||
tail
|
||||
|
||||
@@
|
||||
+only_add
|
||||
*** Add File: c.txt
|
||||
+contents
|
||||
*** Delete File: d.txt
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![
|
||||
UpdateFile {
|
||||
path: PathBuf::from("a.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
change_context: None,
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["lineA".to_string()],
|
||||
is_end_of_file: true,
|
||||
}],
|
||||
},
|
||||
UpdateFile {
|
||||
path: PathBuf::from("b.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![
|
||||
UpdateFileChunk {
|
||||
change_context: Some("ctx".to_string()),
|
||||
old_lines: vec![
|
||||
"shared".to_string(),
|
||||
"old".to_string(),
|
||||
"tail".to_string(),
|
||||
"".to_string(),
|
||||
],
|
||||
new_lines: vec![
|
||||
"shared".to_string(),
|
||||
"new".to_string(),
|
||||
"tail".to_string(),
|
||||
"".to_string(),
|
||||
],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
UpdateFileChunk {
|
||||
change_context: None,
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["only_add".to_string()],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
AddFile {
|
||||
path: PathBuf::from("c.txt"),
|
||||
contents: "contents\n".to_string(),
|
||||
},
|
||||
DeleteFile {
|
||||
path: PathBuf::from("d.txt")
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_file_with_no_content() {
|
||||
let patch = "*** Begin Patch\n\
|
||||
*** Add File: empty.txt\n\
|
||||
*** End Patch";
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![AddFile {
|
||||
path: PathBuf::from("empty.txt"),
|
||||
contents: String::new(),
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_with_move_but_no_chunks_errors() {
|
||||
let patch = "*** Begin Patch\n\
|
||||
*** Update File: file.txt\n\
|
||||
*** Move to: new_file.txt\n\
|
||||
*** End Patch";
|
||||
assert_eq!(
|
||||
parse_patch_text(patch, ParseMode::Strict),
|
||||
Err(InvalidHunkError {
|
||||
message: "Update file hunk for path 'file.txt' is empty".to_string(),
|
||||
line_number: 2,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_first_chunk_without_context_then_second_with_context() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: src/sample.txt
|
||||
context_line
|
||||
+added
|
||||
@@ ctx2
|
||||
+added2
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![UpdateFile {
|
||||
path: PathBuf::from("src/sample.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![
|
||||
UpdateFileChunk {
|
||||
change_context: None,
|
||||
old_lines: vec!["context_line".to_string()],
|
||||
new_lines: vec!["context_line".to_string(), "added".to_string()],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
UpdateFileChunk {
|
||||
change_context: Some("ctx2".to_string()),
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["added2".to_string()],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
],
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_chunks_separated_by_whitespace_lines() {
|
||||
// Separator lines containing only whitespace should be ignored between chunks.
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: src/ws.txt
|
||||
@@ c1
|
||||
ctx
|
||||
+add
|
||||
|
||||
|
||||
@@ c2
|
||||
+tail
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![UpdateFile {
|
||||
path: PathBuf::from("src/ws.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![
|
||||
UpdateFileChunk {
|
||||
change_context: Some("c1".to_string()),
|
||||
old_lines: vec!["ctx".to_string(), " ".to_string(), "".to_string()],
|
||||
new_lines: vec![
|
||||
"ctx".to_string(),
|
||||
"add".to_string(),
|
||||
" ".to_string(),
|
||||
"".to_string(),
|
||||
],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
UpdateFileChunk {
|
||||
change_context: Some("c2".to_string()),
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["tail".to_string()],
|
||||
is_end_of_file: false,
|
||||
},
|
||||
],
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_second_chunk_header_missing_space_after_atat_errors() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: f.txt
|
||||
@@ ok
|
||||
+one
|
||||
|
||||
@@ctx
|
||||
+two
|
||||
*** End Patch"#;
|
||||
|
||||
match parse_patch_text(patch, ParseMode::Strict) {
|
||||
Err(InvalidHunkError {
|
||||
message,
|
||||
line_number,
|
||||
}) => {
|
||||
assert!(message.starts_with("Expected update hunk to start with a @@ context marker"));
|
||||
assert_eq!(line_number, 6);
|
||||
}
|
||||
other => panic!("expected InvalidHunkError, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_leading_space_before_atat_treated_as_context_line() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: file.txt
|
||||
@@ header
|
||||
+add
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![UpdateFile {
|
||||
path: PathBuf::from("file.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
change_context: None,
|
||||
old_lines: vec!["@@ header".to_string()],
|
||||
new_lines: vec!["@@ header".to_string(), "add".to_string()],
|
||||
is_end_of_file: false,
|
||||
}],
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_first_chunk_without_context_and_eof_marker() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: z.txt
|
||||
+added
|
||||
*** End of File
|
||||
*** End Patch"#;
|
||||
|
||||
let result = parse_patch_text(patch, ParseMode::Strict).unwrap();
|
||||
assert_eq!(
|
||||
result.hunks,
|
||||
vec![UpdateFile {
|
||||
path: PathBuf::from("z.txt"),
|
||||
move_path: None,
|
||||
chunks: vec![UpdateFileChunk {
|
||||
change_context: None,
|
||||
old_lines: vec![],
|
||||
new_lines: vec!["added".to_string()],
|
||||
is_end_of_file: true,
|
||||
}],
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_second_move_to_after_chunk_is_invalid_hunk_header() {
|
||||
let patch = r#"*** Begin Patch
|
||||
*** Update File: file.txt
|
||||
@@
|
||||
+line
|
||||
*** Move to: another.txt
|
||||
*** End Patch"#;
|
||||
|
||||
match parse_patch_text(patch, ParseMode::Strict) {
|
||||
Err(InvalidHunkError {
|
||||
message,
|
||||
line_number,
|
||||
}) => {
|
||||
assert!(message.starts_with("'*** Move to: another.txt' is not a valid hunk header."));
|
||||
assert_eq!(line_number, 5);
|
||||
}
|
||||
other => panic!("expected InvalidHunkError, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
59
codex-rs/apply-patch/src/standalone_executable.rs
Normal file
59
codex-rs/apply-patch/src/standalone_executable.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
use std::io::Read;
|
||||
use std::io::Write;
|
||||
|
||||
pub fn main() -> ! {
|
||||
let exit_code = run_main();
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
|
||||
/// We would prefer to return `std::process::ExitCode`, but its `exit_process()`
|
||||
/// method is still a nightly API and we want main() to return !.
|
||||
pub fn run_main() -> i32 {
|
||||
// Expect either one argument (the full apply_patch payload) or read it from stdin.
|
||||
let mut args = std::env::args_os();
|
||||
let _argv0 = args.next();
|
||||
|
||||
let patch_arg = match args.next() {
|
||||
Some(arg) => match arg.into_string() {
|
||||
Ok(s) => s,
|
||||
Err(_) => {
|
||||
eprintln!("Error: apply_patch requires a UTF-8 PATCH argument.");
|
||||
return 1;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
// No argument provided; attempt to read the patch from stdin.
|
||||
let mut buf = String::new();
|
||||
match std::io::stdin().read_to_string(&mut buf) {
|
||||
Ok(_) => {
|
||||
if buf.is_empty() {
|
||||
eprintln!("Usage: apply_patch 'PATCH'\n echo 'PATCH' | apply-patch");
|
||||
return 2;
|
||||
}
|
||||
buf
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error: Failed to read PATCH from stdin.\n{err}");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Refuse extra args to avoid ambiguity.
|
||||
if args.next().is_some() {
|
||||
eprintln!("Error: apply_patch accepts exactly one argument.");
|
||||
return 2;
|
||||
}
|
||||
|
||||
let mut stdout = std::io::stdout();
|
||||
let mut stderr = std::io::stderr();
|
||||
match crate::apply_patch(&patch_arg, &mut stdout, &mut stderr) {
|
||||
Ok(()) => {
|
||||
// Flush to ensure output ordering when used in pipelines.
|
||||
let _ = stdout.flush();
|
||||
0
|
||||
}
|
||||
Err(_) => 1,
|
||||
}
|
||||
}
|
||||
3
codex-rs/apply-patch/tests/all.rs
Normal file
3
codex-rs/apply-patch/tests/all.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
90
codex-rs/apply-patch/tests/suite/cli.rs
Normal file
90
codex-rs/apply-patch/tests/suite/cli.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let file = "cli_test.txt";
|
||||
let absolute_path = tmp.path().join(file);
|
||||
|
||||
// 1) Add a file
|
||||
let add_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Add File: {file}
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n");
|
||||
|
||||
// 2) Update the file
|
||||
let update_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: {file}
|
||||
@@
|
||||
-hello
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "world\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let file = "cli_test_stdin.txt";
|
||||
let absolute_path = tmp.path().join(file);
|
||||
|
||||
// 1) Add a file via stdin
|
||||
let add_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Add File: {file}
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n");
|
||||
|
||||
// 2) Update the file via stdin
|
||||
let update_patch = format!(
|
||||
r#"*** Begin Patch
|
||||
*** Update File: {file}
|
||||
@@
|
||||
-hello
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
assert_eq!(fs::read_to_string(&absolute_path)?, "world\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1
codex-rs/apply-patch/tests/suite/mod.rs
Normal file
1
codex-rs/apply-patch/tests/suite/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
mod cli;
|
||||
@@ -16,4 +16,5 @@ codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-core = { path = "../core" }
|
||||
codex-linux-sandbox = { path = "../linux-sandbox" }
|
||||
dotenvy = "0.15.7"
|
||||
tempfile = "3"
|
||||
tokio = { version = "1", features = ["rt-multi-thread"] }
|
||||
|
||||
@@ -3,6 +3,13 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_core::CODEX_APPLY_PATCH_ARG1;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::symlink;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox";
|
||||
const APPLY_PATCH_ARG0: &str = "apply_patch";
|
||||
const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch";
|
||||
|
||||
/// While we want to deploy the Codex CLI as a single executable for simplicity,
|
||||
/// we also want to expose some of its functionality as distinct CLIs, so we use
|
||||
@@ -39,9 +46,11 @@ where
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("");
|
||||
|
||||
if exe_name == "codex-linux-sandbox" {
|
||||
if exe_name == LINUX_SANDBOX_ARG0 {
|
||||
// Safety: [`run_main`] never returns.
|
||||
codex_linux_sandbox::run_main();
|
||||
} else if exe_name == APPLY_PATCH_ARG0 || exe_name == MISSPELLED_APPLY_PATCH_ARG0 {
|
||||
codex_apply_patch::main();
|
||||
}
|
||||
|
||||
let argv1 = args.next().unwrap_or_default();
|
||||
@@ -68,6 +77,19 @@ where
|
||||
// before creating any threads/the Tokio runtime.
|
||||
load_dotenv();
|
||||
|
||||
// Retain the TempDir so it exists for the lifetime of the invocation of
|
||||
// this executable. Admittedly, we could invoke `keep()` on it, but it
|
||||
// would be nice to avoid leaving temporary directories behind, if possible.
|
||||
let _path_entry = match prepend_path_entry_for_apply_patch() {
|
||||
Ok(path_entry) => Some(path_entry),
|
||||
Err(err) => {
|
||||
// It is possible that Codex will proceed successfully even if
|
||||
// updating the PATH fails, so warn the user and move on.
|
||||
eprintln!("WARNING: proceeding, even though we could not update PATH: {err}");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// Regular invocation – create a Tokio runtime and execute the provided
|
||||
// async entry-point.
|
||||
let runtime = tokio::runtime::Runtime::new()?;
|
||||
@@ -82,10 +104,98 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_";
|
||||
|
||||
/// Load env vars from ~/.codex/.env and `$(pwd)/.env`.
|
||||
///
|
||||
/// Security: Do not allow `.env` files to create or modify any variables
|
||||
/// with names starting with `CODEX_`.
|
||||
fn load_dotenv() {
|
||||
if let Ok(codex_home) = codex_core::config::find_codex_home() {
|
||||
dotenvy::from_path(codex_home.join(".env")).ok();
|
||||
if let Ok(codex_home) = codex_core::config::find_codex_home()
|
||||
&& let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env"))
|
||||
{
|
||||
set_filtered(iter);
|
||||
}
|
||||
|
||||
if let Ok(iter) = dotenvy::dotenv_iter() {
|
||||
set_filtered(iter);
|
||||
}
|
||||
dotenvy::dotenv().ok();
|
||||
}
|
||||
|
||||
/// Helper to set vars from a dotenvy iterator while filtering out `CODEX_` keys.
|
||||
fn set_filtered<I>(iter: I)
|
||||
where
|
||||
I: IntoIterator<Item = Result<(String, String), dotenvy::Error>>,
|
||||
{
|
||||
for (key, value) in iter.into_iter().flatten() {
|
||||
if !key.to_ascii_uppercase().starts_with(ILLEGAL_ENV_VAR_PREFIX) {
|
||||
// It is safe to call set_var() because our process is
|
||||
// single-threaded at this point in its execution.
|
||||
unsafe { std::env::set_var(&key, &value) };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a temporary directory with either:
|
||||
///
|
||||
/// - UNIX: `apply_patch` symlink to the current executable
|
||||
/// - WINDOWS: `apply_patch.bat` batch script to invoke the current executable
|
||||
/// with the "secret" --codex-run-as-apply-patch flag.
|
||||
///
|
||||
/// This temporary directory is prepended to the PATH environment variable so
|
||||
/// that `apply_patch` can be on the PATH without requiring the user to
|
||||
/// install a separate `apply_patch` executable, simplifying the deployment of
|
||||
/// Codex CLI.
|
||||
///
|
||||
/// IMPORTANT: This function modifies the PATH environment variable, so it MUST
|
||||
/// be called before multiple threads are spawned.
|
||||
fn prepend_path_entry_for_apply_patch() -> std::io::Result<TempDir> {
|
||||
let temp_dir = TempDir::new()?;
|
||||
let path = temp_dir.path();
|
||||
|
||||
for filename in &[APPLY_PATCH_ARG0, MISSPELLED_APPLY_PATCH_ARG0] {
|
||||
let exe = std::env::current_exe()?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let link = path.join(filename);
|
||||
symlink(&exe, &link)?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
let batch_script = path.join(format!("{filename}.bat"));
|
||||
std::fs::write(
|
||||
&batch_script,
|
||||
format!(
|
||||
r#"@echo off
|
||||
"{}" {CODEX_APPLY_PATCH_ARG1} %*
|
||||
"#,
|
||||
exe.display()
|
||||
),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
const PATH_SEPARATOR: &str = ":";
|
||||
|
||||
#[cfg(windows)]
|
||||
const PATH_SEPARATOR: &str = ";";
|
||||
|
||||
let path_element = path.display();
|
||||
let updated_path_env_var = match std::env::var("PATH") {
|
||||
Ok(existing_path) => {
|
||||
format!("{path_element}{PATH_SEPARATOR}{existing_path}")
|
||||
}
|
||||
Err(_) => {
|
||||
format!("{path_element}")
|
||||
}
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::env::set_var("PATH", updated_path_env_var);
|
||||
}
|
||||
|
||||
Ok(temp_dir)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use codex_core::config::Config;
|
||||
use codex_core::user_agent::get_codex_user_agent;
|
||||
|
||||
use crate::chatgpt_token::get_chatgpt_token_data;
|
||||
use crate::chatgpt_token::init_chatgpt_token_from_auth;
|
||||
@@ -30,7 +31,7 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
|
||||
.bearer_auth(&token.access_token)
|
||||
.header("chatgpt-account-id", account_id?)
|
||||
.header("Content-Type", "application/json")
|
||||
.header("User-Agent", "codex-cli")
|
||||
.header("User-Agent", get_codex_user_agent(None))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request")?;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CodexAuth;
|
||||
use std::path::Path;
|
||||
use std::sync::LazyLock;
|
||||
@@ -19,7 +20,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
|
||||
|
||||
/// Initialize the ChatGPT token from auth.json file
|
||||
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
|
||||
let auth = CodexAuth::from_codex_home(codex_home)?;
|
||||
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
|
||||
if let Some(auth) = auth {
|
||||
let token_data = auth.get_token_data().await?;
|
||||
set_chatgpt_token_data(token_data);
|
||||
|
||||
3
codex-rs/chatgpt/tests/all.rs
Normal file
3
codex-rs/chatgpt/tests/all.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// Single integration test binary that aggregates all test modules.
|
||||
// The submodules live in `tests/suite/`.
|
||||
mod suite;
|
||||
@@ -1,5 +1,3 @@
|
||||
#![expect(clippy::expect_used)]
|
||||
|
||||
use codex_chatgpt::apply_command::apply_diff_from_task;
|
||||
use codex_chatgpt::get_task::GetTaskResponse;
|
||||
use std::path::Path;
|
||||
2
codex-rs/chatgpt/tests/suite/mod.rs
Normal file
2
codex-rs/chatgpt/tests/suite/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod apply_command_e2e;
|
||||
@@ -25,6 +25,7 @@ codex-core = { path = "../core" }
|
||||
codex-exec = { path = "../exec" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-mcp-server = { path = "../mcp-server" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
codex-tui = { path = "../tui" }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = [
|
||||
@@ -36,3 +37,4 @@ tokio = { version = "1", features = [
|
||||
] }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.19"
|
||||
codex-protocol-ts = { path = "../protocol-ts" }
|
||||
|
||||
@@ -3,11 +3,11 @@ use std::path::PathBuf;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config_types::SandboxMode;
|
||||
use codex_core::exec::spawn_command_under_linux_sandbox;
|
||||
use codex_core::exec_env::create_env;
|
||||
use codex_core::landlock::spawn_command_under_linux_sandbox;
|
||||
use codex_core::seatbelt::spawn_command_under_seatbelt;
|
||||
use codex_core::spawn::StdioPolicy;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
|
||||
use crate::LandlockCommand;
|
||||
use crate::SeatbeltCommand;
|
||||
|
||||
@@ -1,20 +1,33 @@
|
||||
use std::env;
|
||||
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CLIENT_ID;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::OPENAI_API_KEY_ENV_VAR;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_login::login_with_chatgpt;
|
||||
use codex_login::logout;
|
||||
use codex_login::run_login_server;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string());
|
||||
let server = run_login_server(opts)?;
|
||||
|
||||
eprintln!(
|
||||
"Starting local login server on http://localhost:{}.\nIf your browser did not open, navigate to this URL to authenticate:\n\n{}",
|
||||
server.actual_port, server.auth_url,
|
||||
);
|
||||
|
||||
server.block_until_done().await
|
||||
}
|
||||
|
||||
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
let capture_output = false;
|
||||
match login_with_chatgpt(&config.codex_home, capture_output).await {
|
||||
match login_with_chatgpt(config.codex_home).await {
|
||||
Ok(_) => {
|
||||
eprintln!("Successfully logged in");
|
||||
std::process::exit(0);
|
||||
@@ -47,18 +60,18 @@ pub async fn run_login_with_api_key(
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
match CodexAuth::from_codex_home(&config.codex_home) {
|
||||
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
AuthMode::ApiKey => match auth.get_token().await {
|
||||
Ok(api_key) => {
|
||||
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
|
||||
|
||||
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR) {
|
||||
if env_api_key == api_key {
|
||||
eprintln!(
|
||||
" API loaded from OPENAI_API_KEY environment variable or .env file"
|
||||
);
|
||||
}
|
||||
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
&& env_api_key == api_key
|
||||
{
|
||||
eprintln!(
|
||||
" API loaded from OPENAI_API_KEY environment variable or .env file"
|
||||
);
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
@@ -27,7 +27,11 @@ use crate::proto::ProtoCli;
|
||||
author,
|
||||
version,
|
||||
// If a sub‑command is given, ignore requirements of the default args.
|
||||
subcommand_negates_reqs = true
|
||||
subcommand_negates_reqs = true,
|
||||
// The executable is sometimes invoked via a platform‑specific name like
|
||||
// `codex-x86_64-unknown-linux-musl`, but the help output should always use
|
||||
// the generic `codex` command name that users run.
|
||||
bin_name = "codex"
|
||||
)]
|
||||
struct MultitoolCli {
|
||||
#[clap(flatten)]
|
||||
@@ -68,6 +72,10 @@ enum Subcommand {
|
||||
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
|
||||
#[clap(visible_alias = "a")]
|
||||
Apply(ApplyCommand),
|
||||
|
||||
/// Internal: generate TypeScript protocol bindings.
|
||||
#[clap(hide = true)]
|
||||
GenerateTs(GenerateTsCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -116,6 +124,17 @@ struct LogoutCommand {
|
||||
config_overrides: CliConfigOverrides,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct GenerateTsCommand {
|
||||
/// Output directory where .ts files will be written
|
||||
#[arg(short = 'o', long = "out", value_name = "DIR")]
|
||||
out_dir: PathBuf,
|
||||
|
||||
/// Optional path to the Prettier executable to format generated files
|
||||
#[arg(short = 'p', long = "prettier", value_name = "PRETTIER_BIN")]
|
||||
prettier: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
cli_main(codex_linux_sandbox_exe).await?;
|
||||
@@ -140,7 +159,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Mcp) => {
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe).await?;
|
||||
codex_mcp_server::run_main(codex_linux_sandbox_exe, cli.config_overrides).await?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides);
|
||||
@@ -190,6 +209,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
prepend_config_flags(&mut apply_cli.config_overrides, cli.config_overrides);
|
||||
run_apply_command(apply_cli, None).await?;
|
||||
}
|
||||
Some(Subcommand::GenerateTs(gen_cli)) => {
|
||||
codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use std::io::IsTerminal;
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::Codex;
|
||||
use codex_core::CodexSpawnOk;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Submission;
|
||||
use codex_core::util::notify_on_sigint;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::AuthManager;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::BufReader;
|
||||
use tracing::error;
|
||||
@@ -36,22 +36,41 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
.map_err(anyhow::Error::msg)?;
|
||||
|
||||
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
|
||||
let auth = CodexAuth::from_codex_home(&config.codex_home)?;
|
||||
let ctrl_c = notify_on_sigint();
|
||||
let CodexSpawnOk { codex, .. } = Codex::spawn(config, auth, ctrl_c.clone()).await?;
|
||||
let codex = Arc::new(codex);
|
||||
// Use conversation_manager API to start a conversation
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
session_configured,
|
||||
} = conversation_manager.new_conversation(config).await?;
|
||||
|
||||
// Simulate streaming the session_configured event.
|
||||
let synthetic_event = Event {
|
||||
// Fake id value.
|
||||
id: "".to_string(),
|
||||
msg: EventMsg::SessionConfigured(session_configured),
|
||||
};
|
||||
let session_configured_event = match serde_json::to_string(&synthetic_event) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!("Failed to serialize session_configured: {e}");
|
||||
return Err(anyhow::Error::from(e));
|
||||
}
|
||||
};
|
||||
println!("{session_configured_event}");
|
||||
|
||||
// Task that reads JSON lines from stdin and forwards to Submission Queue
|
||||
let sq_fut = {
|
||||
let codex = codex.clone();
|
||||
let ctrl_c = ctrl_c.clone();
|
||||
let conversation = conversation.clone();
|
||||
async move {
|
||||
let stdin = BufReader::new(tokio::io::stdin());
|
||||
let mut lines = stdin.lines();
|
||||
loop {
|
||||
let result = tokio::select! {
|
||||
_ = ctrl_c.notified() => {
|
||||
info!("Interrupted, exiting");
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
break
|
||||
},
|
||||
res = lines.next_line() => res,
|
||||
@@ -65,7 +84,7 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
}
|
||||
match serde_json::from_str::<Submission>(line) {
|
||||
Ok(sub) => {
|
||||
if let Err(e) = codex.submit_with_id(sub).await {
|
||||
if let Err(e) = conversation.submit_with_id(sub).await {
|
||||
error!("{e:#}");
|
||||
break;
|
||||
}
|
||||
@@ -88,8 +107,8 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
let eq_fut = async move {
|
||||
loop {
|
||||
let event = tokio::select! {
|
||||
_ = ctrl_c.notified() => break,
|
||||
event = codex.next_event() => event,
|
||||
_ = tokio::signal::ctrl_c() => break,
|
||||
event = conversation.next_event() => event,
|
||||
};
|
||||
match event {
|
||||
Ok(event) => {
|
||||
|
||||
9
codex-rs/clippy.toml
Normal file
9
codex-rs/clippy.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
allow-expect-in-tests = true
|
||||
allow-unwrap-in-tests = true
|
||||
disallowed-methods = [
|
||||
{ path = "ratatui::style::Color::Rgb", reason = "Use ANSI colors, which work better in various terminal themes." },
|
||||
{ path = "ratatui::style::Color::Indexed", reason = "Use ANSI colors, which work better in various terminal themes." },
|
||||
{ path = "ratatui::style::Stylize::white", reason = "Avoid hardcoding white; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
|
||||
{ path = "ratatui::style::Stylize::black", reason = "Avoid hardcoding black; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." },
|
||||
{ path = "ratatui::style::Stylize::yellow", reason = "Avoid yellow; prefer other colors in `tui/styles.md`." },
|
||||
]
|
||||
@@ -9,6 +9,7 @@ workspace = true
|
||||
[dependencies]
|
||||
clap = { version = "4", features = ["derive", "wrap_help"], optional = true }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
serde = { version = "1", optional = true }
|
||||
toml = { version = "0.9", optional = true }
|
||||
|
||||
|
||||
46
codex-rs/common/src/approval_presets.rs
Normal file
46
codex-rs/common/src/approval_presets.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
|
||||
/// A simple preset pairing an approval policy with a sandbox policy.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ApprovalPreset {
|
||||
/// Stable identifier for the preset.
|
||||
pub id: &'static str,
|
||||
/// Display label shown in UIs.
|
||||
pub label: &'static str,
|
||||
/// Short human description shown next to the label in UIs.
|
||||
pub description: &'static str,
|
||||
/// Approval policy to apply.
|
||||
pub approval: AskForApproval,
|
||||
/// Sandbox policy to apply.
|
||||
pub sandbox: SandboxPolicy,
|
||||
}
|
||||
|
||||
/// Built-in list of approval presets that pair approval and sandbox policy.
|
||||
///
|
||||
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
|
||||
pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
|
||||
vec![
|
||||
ApprovalPreset {
|
||||
id: "read-only",
|
||||
label: "Read Only",
|
||||
description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network",
|
||||
approval: AskForApproval::OnRequest,
|
||||
sandbox: SandboxPolicy::ReadOnly,
|
||||
},
|
||||
ApprovalPreset {
|
||||
id: "auto",
|
||||
label: "Auto",
|
||||
description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network",
|
||||
approval: AskForApproval::OnRequest,
|
||||
sandbox: SandboxPolicy::new_workspace_write_policy(),
|
||||
},
|
||||
ApprovalPreset {
|
||||
id: "full-access",
|
||||
label: "Full Access",
|
||||
description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution",
|
||||
approval: AskForApproval::Never,
|
||||
sandbox: SandboxPolicy::DangerFullAccess,
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -142,7 +142,6 @@ fn parse_toml_value(raw: &str) -> Result<Value, toml::de::Error> {
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "cli"))]
|
||||
#[allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
@@ -29,3 +29,8 @@ mod config_summary;
|
||||
pub use config_summary::create_config_summary_entries;
|
||||
// Shared fuzzy matcher (used by TUI selection popups and other UI filtering)
|
||||
pub mod fuzzy_match;
|
||||
// Shared model presets used by TUI and MCP server
|
||||
pub mod model_presets;
|
||||
// Shared approval presets (AskForApproval + Sandbox) used by TUI and MCP server
|
||||
// Not to be confused with AskForApproval, which we should probably rename to EscalationPolicy.
|
||||
pub mod approval_presets;
|
||||
|
||||
54
codex-rs/common/src/model_presets.rs
Normal file
54
codex-rs/common/src/model_presets.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
|
||||
/// A simple preset pairing a model slug with a reasoning effort.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ModelPreset {
|
||||
/// Stable identifier for the preset.
|
||||
pub id: &'static str,
|
||||
/// Display label shown in UIs.
|
||||
pub label: &'static str,
|
||||
/// Short human description shown next to the label in UIs.
|
||||
pub description: &'static str,
|
||||
/// Model slug (e.g., "gpt-5").
|
||||
pub model: &'static str,
|
||||
/// Reasoning effort to apply for this preset.
|
||||
pub effort: ReasoningEffort,
|
||||
}
|
||||
|
||||
/// Built-in list of model presets that pair a model with a reasoning effort.
|
||||
///
|
||||
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
|
||||
pub fn builtin_model_presets() -> &'static [ModelPreset] {
|
||||
// Order reflects effort from minimal to high.
|
||||
const PRESETS: &[ModelPreset] = &[
|
||||
ModelPreset {
|
||||
id: "gpt-5-minimal",
|
||||
label: "gpt-5 minimal",
|
||||
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Minimal,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-low",
|
||||
label: "gpt-5 low",
|
||||
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Low,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-medium",
|
||||
label: "gpt-5 medium",
|
||||
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::Medium,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-high",
|
||||
label: "gpt-5 high",
|
||||
description: "— maximizes reasoning depth for complex or ambiguous problems",
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::High,
|
||||
},
|
||||
];
|
||||
PRESETS
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
//! `config.toml`.
|
||||
|
||||
use clap::ValueEnum;
|
||||
use codex_core::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
|
||||
#[derive(Clone, Copy, Debug, ValueEnum)]
|
||||
#[value(rename_all = "kebab-case")]
|
||||
|
||||
@@ -149,6 +149,7 @@ approval_policy = "untrusted"
|
||||
```
|
||||
|
||||
If you want to be notified whenever a command fails, use "on-failure":
|
||||
|
||||
```toml
|
||||
# If the command fails when run in the sandbox, Codex asks for permission to
|
||||
# retry the command outside the sandbox.
|
||||
@@ -156,12 +157,14 @@ approval_policy = "on-failure"
|
||||
```
|
||||
|
||||
If you want the model to run until it decides that it needs to ask you for escalated permissions, use "on-request":
|
||||
|
||||
```toml
|
||||
# The model decides when to escalate
|
||||
approval_policy = "on-request"
|
||||
```
|
||||
|
||||
Alternatively, you can have the model run until it is done, and never ask to run a command with escalated permissions:
|
||||
|
||||
```toml
|
||||
# User is never prompted: if the command fails, Codex will automatically try
|
||||
# something out. Note the `exec` subcommand always uses this mode.
|
||||
@@ -217,17 +220,14 @@ Users can specify config values at multiple levels. Order of precedence is as fo
|
||||
|
||||
## model_reasoning_effort
|
||||
|
||||
If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
|
||||
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
|
||||
|
||||
- `"minimal"`
|
||||
- `"low"`
|
||||
- `"medium"` (default)
|
||||
- `"high"`
|
||||
|
||||
To disable reasoning, set `model_reasoning_effort` to `"none"` in your config:
|
||||
|
||||
```toml
|
||||
model_reasoning_effort = "none" # disable reasoning
|
||||
```
|
||||
Note: to minimize reasoning, choose `"minimal"`.
|
||||
|
||||
## model_reasoning_summary
|
||||
|
||||
@@ -243,6 +243,25 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
|
||||
model_reasoning_summary = "none" # disable reasoning summaries
|
||||
```
|
||||
|
||||
## model_verbosity
|
||||
|
||||
Controls output length/detail on GPT‑5 family models when using the Responses API. Supported values:
|
||||
|
||||
- `"low"`
|
||||
- `"medium"` (default when omitted)
|
||||
- `"high"`
|
||||
|
||||
When set, Codex includes a `text` object in the request payload with the configured verbosity, for example: `"text": { "verbosity": "low" }`.
|
||||
|
||||
Example:
|
||||
|
||||
```toml
|
||||
model = "gpt-5"
|
||||
model_verbosity = "low"
|
||||
```
|
||||
|
||||
Note: This applies only to providers using the Responses API. Chat Completions providers are unaffected.
|
||||
|
||||
## model_supports_reasoning_summaries
|
||||
|
||||
By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`:
|
||||
@@ -281,6 +300,9 @@ sandbox_mode = "workspace-write"
|
||||
exclude_tmpdir_env_var = false
|
||||
exclude_slash_tmp = false
|
||||
|
||||
# Optional list of _additional_ writable roots beyond $TMPDIR and /tmp.
|
||||
writable_roots = ["/Users/YOU/.pyenv/shims"]
|
||||
|
||||
# Allow the command being run inside the sandbox to make outbound network
|
||||
# requests. Disabled by default.
|
||||
network_access = false
|
||||
@@ -297,6 +319,16 @@ This is reasonable to use if Codex is running in an environment that provides it
|
||||
|
||||
Though using this option may also be necessary if you try to use Codex in environments where its native sandboxing mechanisms are unsupported, such as older Linux kernels or on Windows.
|
||||
|
||||
## Approval presets
|
||||
|
||||
Codex provides three main Approval Presets:
|
||||
|
||||
- Read Only: Codex can read files and answer questions; edits, running commands, and network access require approval.
|
||||
- Auto: Codex can read files, make edits, and run commands in the workspace without approval; asks for approval outside the workspace or for network access.
|
||||
- Full Access: Full disk and network access without prompts; extremely risky.
|
||||
|
||||
You can further customize how Codex runs at the command line using the `--ask-for-approval` and `--sandbox` options.
|
||||
|
||||
## mcp_servers
|
||||
|
||||
Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy).
|
||||
@@ -498,10 +530,12 @@ hide_agent_reasoning = true # defaults to false
|
||||
Surfaces the model’s raw chain-of-thought ("raw reasoning content") when available.
|
||||
|
||||
Notes:
|
||||
|
||||
- Only takes effect if the selected model/provider actually emits raw reasoning content. Many models do not. When unsupported, this option has no visible effect.
|
||||
- Raw reasoning may include intermediate thoughts or sensitive context. Enable only if acceptable for your workflow.
|
||||
|
||||
Example:
|
||||
|
||||
```toml
|
||||
show_raw_agent_reasoning = true # defaults to false
|
||||
```
|
||||
|
||||
@@ -6,6 +6,7 @@ version = { workspace = true }
|
||||
[lib]
|
||||
name = "codex_core"
|
||||
path = "src/lib.rs"
|
||||
doctest = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -19,19 +20,22 @@ chrono = { version = "0.4", features = ["serde"] }
|
||||
codex-apply-patch = { path = "../apply-patch" }
|
||||
codex-login = { path = "../login" }
|
||||
codex-mcp-client = { path = "../mcp-client" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
dirs = "6"
|
||||
env-flags = "0.1.1"
|
||||
eventsource-stream = "0.2.3"
|
||||
fs2 = "0.4.3"
|
||||
futures = "0.3"
|
||||
libc = "0.2.174"
|
||||
libc = "0.2.175"
|
||||
mcp-types = { path = "../mcp-types" }
|
||||
mime_guess = "2.0"
|
||||
os_info = "3.12.0"
|
||||
portable-pty = "0.9.0"
|
||||
rand = "0.9"
|
||||
regex-lite = "0.1.6"
|
||||
reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
serde_bytes = "0.11"
|
||||
serde_json = "1"
|
||||
sha1 = "0.10.6"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
@@ -48,12 +52,12 @@ tokio = { version = "1", features = [
|
||||
] }
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.3"
|
||||
toml_edit = "0.23.4"
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tree-sitter = "0.25.8"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
whoami = "1.6.0"
|
||||
whoami = "1.6.1"
|
||||
wildmatch = "2.4.0"
|
||||
|
||||
|
||||
@@ -69,6 +73,9 @@ openssl-sys = { version = "*", features = ["vendored"] }
|
||||
[target.aarch64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { version = "*", features = ["vendored"] }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
which = "6"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2"
|
||||
core_test_support = { path = "tests/common" }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.
|
||||
|
||||
Your capabilities:
|
||||
|
||||
- Receive user prompts and other context provided by the harness, such as files in the workspace.
|
||||
- Communicate with the user by streaming thinking & responses, and by making & updating plans.
|
||||
- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section.
|
||||
@@ -20,11 +21,13 @@ Your default personality and tone is concise, direct, and friendly. You communic
|
||||
Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples:
|
||||
|
||||
- **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each.
|
||||
- **Keep it concise**: be no more than 1-2 sentences (8–12 words for quick updates).
|
||||
- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates).
|
||||
- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions.
|
||||
- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.
|
||||
- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action.
|
||||
|
||||
**Examples:**
|
||||
|
||||
- “I’ve explored the repo; now checking the API route definitions.”
|
||||
- “Next, I’ll patch the config and update the related tests.”
|
||||
- “I’m about to scaffold the CLI commands and helper functions.”
|
||||
@@ -34,15 +37,18 @@ Before making tool calls, send a brief preamble to the user explaining what you
|
||||
- “Alright, build pipeline order is interesting. Checking how it reports failures.”
|
||||
- “Spotted a clever caching util; now hunting where it gets used.”
|
||||
|
||||
**Avoiding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action.
|
||||
- Jumping straight into tool calls without explaining what’s about to happen.
|
||||
- Writing overly long or speculative preambles — focus on immediate, tangible next steps.
|
||||
|
||||
## Planning
|
||||
|
||||
You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. Note that plans are not for padding out simple work with filler steps or stating the obvious. Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step.
|
||||
You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go.
|
||||
|
||||
Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately.
|
||||
|
||||
Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step.
|
||||
|
||||
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
|
||||
|
||||
Use a plan when:
|
||||
|
||||
- The task is non-trivial and will require multiple actions over a long time horizon.
|
||||
- There are logical phases or dependencies where sequencing matters.
|
||||
- The work has ambiguity that benefits from outlining high-level goals.
|
||||
@@ -51,14 +57,6 @@ Use a plan when:
|
||||
- The user has asked you to use the plan tool (aka "TODOs")
|
||||
- You generate additional steps while working, and plan to do them before yielding to the user
|
||||
|
||||
Skip a plan when:
|
||||
- The task is simple and direct.
|
||||
- Breaking it down would only produce literal or trivial steps.
|
||||
|
||||
Planning steps are called "steps" in the tool, but really they're more like tasks or TODOs. As such they should be very concise descriptions of non-obvious work that an engineer might do like "Write the API spec", then "Update the backend", then "Implement the frontend". On the other hand, it's obvious that you'll usually have to "Explore the codebase" or "Implement the changes", so those are not worth tracking in your plan.
|
||||
|
||||
It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately.
|
||||
|
||||
### Examples
|
||||
|
||||
**High-quality plans**
|
||||
@@ -115,10 +113,11 @@ If you need to write a plan, only write high quality plans, not low quality ones
|
||||
You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer.
|
||||
|
||||
You MUST adhere to the following criteria when solving queries:
|
||||
|
||||
- Working on the repo(s) in the current environment is allowed, even if they are proprietary.
|
||||
- Analyzing code for vulnerabilities is allowed.
|
||||
- Showing user code and tool call details is allowed.
|
||||
- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]}
|
||||
- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]}
|
||||
|
||||
If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines:
|
||||
|
||||
@@ -148,21 +147,25 @@ For all of testing, running, building, and formatting, do not attempt to fix unr
|
||||
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
|
||||
|
||||
Filesystem sandboxing prevents you from editing files without user approval. The options are:
|
||||
- *read-only*: You can only read files.
|
||||
- *workspace-write*: You can read files. You can write to files in your workspace folder, but not outside it.
|
||||
- *danger-full-access*: No filesystem sandboxing.
|
||||
|
||||
- **read-only**: You can only read files.
|
||||
- **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it.
|
||||
- **danger-full-access**: No filesystem sandboxing.
|
||||
|
||||
Network sandboxing prevents you from accessing network without approval. Options are
|
||||
- *ON*
|
||||
- *OFF*
|
||||
|
||||
- **restricted**
|
||||
- **enabled**
|
||||
|
||||
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are
|
||||
- *untrusted*: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- *on-failure*: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- *on-request*: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- *never*: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
@@ -207,6 +210,7 @@ Brevity is very important as a default. You should be very concise (i.e. no more
|
||||
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
||||
|
||||
**Section Headers**
|
||||
|
||||
- Use only when they improve clarity — they are not mandatory for every answer.
|
||||
- Choose descriptive names that fit the content
|
||||
- Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**`
|
||||
@@ -214,6 +218,7 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer.
|
||||
|
||||
**Bullets**
|
||||
|
||||
- Use `-` followed by a space for every bullet.
|
||||
- Bold the keyword, then colon + concise description.
|
||||
- Merge related points when possible; avoid a bullet for every trivial detail.
|
||||
@@ -222,11 +227,13 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Use consistent keyword phrasing and formatting across sections.
|
||||
|
||||
**Monospace**
|
||||
|
||||
- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``).
|
||||
- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command.
|
||||
- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``).
|
||||
|
||||
**Structure**
|
||||
|
||||
- Place related bullets together; don’t mix unrelated concepts in the same section.
|
||||
- Order sections from general → specific → supporting info.
|
||||
- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it.
|
||||
@@ -235,6 +242,7 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Simple results → minimal headers, possibly just a short list or paragraph.
|
||||
|
||||
**Tone**
|
||||
|
||||
- Keep the voice collaborative and natural, like a coding partner handing off work.
|
||||
- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition
|
||||
- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”).
|
||||
@@ -242,6 +250,7 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Use parallel structure in lists for consistency.
|
||||
|
||||
**Don’t**
|
||||
|
||||
- Don’t use literal words “bold” or “monospace” in the content.
|
||||
- Don’t nest bullets or create deep hierarchies.
|
||||
- Don’t output ANSI escape codes directly — the CLI renderer applies them.
|
||||
@@ -252,68 +261,14 @@ Generally, ensure your final answers adapt their shape and depth to the request.
|
||||
|
||||
For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting.
|
||||
|
||||
# Tools
|
||||
# Tool Guidelines
|
||||
|
||||
## `apply_patch`
|
||||
## Shell commands
|
||||
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
When using the shell, you must adhere to the following guidelines:
|
||||
|
||||
**_ Begin Patch
|
||||
[ one or more file sections ]
|
||||
_** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
**_ Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
_** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
\*\*\* Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by \*\*\* Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
- for inserted text,
|
||||
|
||||
* for removed text, or
|
||||
space ( ) for context.
|
||||
At the end of a truncated hunk you can emit \*\*\* End of File.
|
||||
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "**_ Begin Patch" NEWLINE
|
||||
End := "_** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "**_ Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "_** Delete File: " path NEWLINE
|
||||
UpdateFile := "**_ Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "_** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
**_ Begin Patch
|
||||
_** Add File: hello.txt
|
||||
+Hello world
|
||||
**_ Update File: src/app.py
|
||||
_** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
**_ Delete File: obsolete.txt
|
||||
_** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
|
||||
You can invoke apply_patch like:
|
||||
|
||||
```
|
||||
shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]}
|
||||
```
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
- Read files in chunks with a max chunk size of 250 lines. Do not use python scripts to attempt to output larger chunks of a file. Command line output will be truncated after 10 kilobytes or 256 lines of output, regardless of the command used.
|
||||
|
||||
## `update_plan`
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use crate::codex::Session;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::FileChange;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_patch_safety;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::ApplyPatchFileChange;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CODEX_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch";
|
||||
@@ -41,21 +41,16 @@ impl From<ResponseInputItem> for InternalApplyPatchInvocation {
|
||||
|
||||
pub(crate) async fn apply_patch(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
sub_id: &str,
|
||||
call_id: &str,
|
||||
action: ApplyPatchAction,
|
||||
) -> InternalApplyPatchInvocation {
|
||||
let writable_roots_snapshot = {
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let guard = sess.writable_roots.lock().unwrap();
|
||||
guard.clone()
|
||||
};
|
||||
|
||||
match assess_patch_safety(
|
||||
&action,
|
||||
sess.approval_policy,
|
||||
&writable_roots_snapshot,
|
||||
&sess.cwd,
|
||||
turn_context.approval_policy,
|
||||
&turn_context.sandbox_policy,
|
||||
&turn_context.cwd,
|
||||
) {
|
||||
SafetyCheck::AutoApprove { .. } => {
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
@@ -128,30 +123,3 @@ pub(crate) fn convert_apply_patch_to_protocol(
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub(crate) fn get_writable_roots(cwd: &Path) -> Vec<PathBuf> {
|
||||
let mut writable_roots = Vec::new();
|
||||
if cfg!(target_os = "macos") {
|
||||
// On macOS, $TMPDIR is private to the user.
|
||||
writable_roots.push(std::env::temp_dir());
|
||||
|
||||
// Allow pyenv to update its shims directory. Without this, any tool
|
||||
// that happens to be managed by `pyenv` will fail with an error like:
|
||||
//
|
||||
// pyenv: cannot rehash: $HOME/.pyenv/shims isn't writable
|
||||
//
|
||||
// which is emitted every time `pyenv` tries to run `rehash` (for
|
||||
// example, after installing a new Python package that drops an entry
|
||||
// point). Although the sandbox is intentionally read‑only by default,
|
||||
// writing to the user's local `pyenv` directory is safe because it
|
||||
// is already user‑writable and scoped to the current user account.
|
||||
if let Ok(home_dir) = std::env::var("HOME") {
|
||||
let pyenv_dir = PathBuf::from(home_dir).join(".pyenv");
|
||||
writable_roots.push(pyenv_dir);
|
||||
}
|
||||
}
|
||||
|
||||
writable_roots.push(cwd.to_path_buf());
|
||||
|
||||
writable_roots
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
|
||||
fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> {
|
||||
|
||||
@@ -22,11 +22,11 @@ use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ReasoningItemContent;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::create_tools_json_for_chat_completions_api;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Implementation for the classic Chat Completions API.
|
||||
pub(crate) async fn stream_chat_completions(
|
||||
@@ -102,6 +102,33 @@ pub(crate) async fn stream_chat_completions(
|
||||
"content": output.content,
|
||||
}));
|
||||
}
|
||||
ResponseItem::CustomToolCall {
|
||||
id,
|
||||
call_id: _,
|
||||
name,
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": output,
|
||||
}));
|
||||
}
|
||||
ResponseItem::Reasoning { .. } | ResponseItem::Other => {
|
||||
// Omit these items from the conversation history.
|
||||
continue;
|
||||
@@ -213,7 +240,9 @@ async fn process_chat_sse<S>(
|
||||
let sse = match timeout(idle_timeout, stream.next()).await {
|
||||
Ok(Some(Ok(ev))) => ev,
|
||||
Ok(Some(Err(e))) => {
|
||||
let _ = tx_event.send(Err(CodexErr::Stream(e.to_string()))).await;
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream(e.to_string(), None)))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
@@ -228,7 +257,10 @@ async fn process_chat_sse<S>(
|
||||
}
|
||||
Err(_) => {
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream("idle timeout waiting for SSE".into())))
|
||||
.send(Err(CodexErr::Stream(
|
||||
"idle timeout waiting for SSE".into(),
|
||||
None,
|
||||
)))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
@@ -285,13 +317,12 @@ async fn process_chat_sse<S>(
|
||||
.get("delta")
|
||||
.and_then(|d| d.get("content"))
|
||||
.and_then(|c| c.as_str())
|
||||
&& !content.is_empty()
|
||||
{
|
||||
if !content.is_empty() {
|
||||
assistant_text.push_str(content);
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
|
||||
.await;
|
||||
}
|
||||
assistant_text.push_str(content);
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputTextDelta(content.to_string())))
|
||||
.await;
|
||||
}
|
||||
|
||||
// Forward any reasoning/thinking deltas if present.
|
||||
@@ -328,27 +359,25 @@ async fn process_chat_sse<S>(
|
||||
.get("delta")
|
||||
.and_then(|d| d.get("tool_calls"))
|
||||
.and_then(|tc| tc.as_array())
|
||||
&& let Some(tool_call) = tool_calls.first()
|
||||
{
|
||||
if let Some(tool_call) = tool_calls.first() {
|
||||
// Mark that we have an active function call in progress.
|
||||
fn_call_state.active = true;
|
||||
// Mark that we have an active function call in progress.
|
||||
fn_call_state.active = true;
|
||||
|
||||
// Extract call_id if present.
|
||||
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
|
||||
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
|
||||
// Extract call_id if present.
|
||||
if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) {
|
||||
fn_call_state.call_id.get_or_insert_with(|| id.to_string());
|
||||
}
|
||||
|
||||
// Extract function details if present.
|
||||
if let Some(function) = tool_call.get("function") {
|
||||
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
|
||||
fn_call_state.name.get_or_insert_with(|| name.to_string());
|
||||
}
|
||||
|
||||
// Extract function details if present.
|
||||
if let Some(function) = tool_call.get("function") {
|
||||
if let Some(name) = function.get("name").and_then(|n| n.as_str()) {
|
||||
fn_call_state.name.get_or_insert_with(|| name.to_string());
|
||||
}
|
||||
|
||||
if let Some(args_fragment) =
|
||||
function.get("arguments").and_then(|a| a.as_str())
|
||||
{
|
||||
fn_call_state.arguments.push_str(args_fragment);
|
||||
}
|
||||
if let Some(args_fragment) = function.get("arguments").and_then(|a| a.as_str())
|
||||
{
|
||||
fn_call_state.arguments.push_str(args_fragment);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -480,21 +509,23 @@ where
|
||||
// do NOT emit yet. Forward any other item (e.g. FunctionCall) right
|
||||
// away so downstream consumers see it.
|
||||
|
||||
let is_assistant_delta = matches!(&item, crate::models::ResponseItem::Message { role, .. } if role == "assistant");
|
||||
let is_assistant_delta = matches!(&item, codex_protocol::models::ResponseItem::Message { role, .. } if role == "assistant");
|
||||
|
||||
if is_assistant_delta {
|
||||
// Only use the final assistant message if we have not
|
||||
// seen any deltas; otherwise, deltas already built the
|
||||
// cumulative text and this would duplicate it.
|
||||
if this.cumulative.is_empty() {
|
||||
if let crate::models::ResponseItem::Message { content, .. } = &item {
|
||||
if let Some(text) = content.iter().find_map(|c| match c {
|
||||
crate::models::ContentItem::OutputText { text } => Some(text),
|
||||
_ => None,
|
||||
}) {
|
||||
this.cumulative.push_str(text);
|
||||
if this.cumulative.is_empty()
|
||||
&& let codex_protocol::models::ResponseItem::Message { content, .. } =
|
||||
&item
|
||||
&& let Some(text) = content.iter().find_map(|c| match c {
|
||||
codex_protocol::models::ContentItem::OutputText { text } => {
|
||||
Some(text)
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
{
|
||||
this.cumulative.push_str(text);
|
||||
}
|
||||
|
||||
// Swallow assistant message here; emit on Completed.
|
||||
@@ -514,26 +545,27 @@ where
|
||||
if !this.cumulative_reasoning.is_empty()
|
||||
&& matches!(this.mode, AggregateMode::AggregatedOnly)
|
||||
{
|
||||
let aggregated_reasoning = crate::models::ResponseItem::Reasoning {
|
||||
id: String::new(),
|
||||
summary: Vec::new(),
|
||||
content: Some(vec![
|
||||
crate::models::ReasoningItemContent::ReasoningText {
|
||||
text: std::mem::take(&mut this.cumulative_reasoning),
|
||||
},
|
||||
]),
|
||||
encrypted_content: None,
|
||||
};
|
||||
let aggregated_reasoning =
|
||||
codex_protocol::models::ResponseItem::Reasoning {
|
||||
id: String::new(),
|
||||
summary: Vec::new(),
|
||||
content: Some(vec![
|
||||
codex_protocol::models::ReasoningItemContent::ReasoningText {
|
||||
text: std::mem::take(&mut this.cumulative_reasoning),
|
||||
},
|
||||
]),
|
||||
encrypted_content: None,
|
||||
};
|
||||
this.pending
|
||||
.push_back(ResponseEvent::OutputItemDone(aggregated_reasoning));
|
||||
emitted_any = true;
|
||||
}
|
||||
|
||||
if !this.cumulative.is_empty() {
|
||||
let aggregated_message = crate::models::ResponseItem::Message {
|
||||
let aggregated_message = codex_protocol::models::ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![crate::models::ContentItem::OutputText {
|
||||
content: vec![codex_protocol::models::ContentItem::OutputText {
|
||||
text: std::mem::take(&mut this.cumulative),
|
||||
}],
|
||||
};
|
||||
@@ -588,6 +620,15 @@ where
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryDelta(_)))) => {
|
||||
continue;
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded))) => {
|
||||
continue;
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { .. }))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin {
|
||||
call_id: String::new(),
|
||||
query: None,
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::Duration;
|
||||
|
||||
use bytes::Bytes;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CodexAuth;
|
||||
use eventsource_stream::Eventsource;
|
||||
use futures::prelude::*;
|
||||
use regex_lite::Regex;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
@@ -26,19 +28,22 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::client_common::ResponsesApiRequest;
|
||||
use crate::client_common::create_reasoning_param_for_request;
|
||||
use crate::client_common::create_text_param_for_request;
|
||||
use crate::config::Config;
|
||||
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::error::UsageLimitReachedError;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::create_tools_json_for_responses_api;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::user_agent::get_codex_user_agent;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -48,13 +53,15 @@ struct ErrorResponse {
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Error {
|
||||
r#type: String,
|
||||
r#type: Option<String>,
|
||||
code: Option<String>,
|
||||
message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
client: reqwest::Client,
|
||||
provider: ModelProviderInfo,
|
||||
session_id: Uuid,
|
||||
@@ -65,7 +72,7 @@ pub struct ModelClient {
|
||||
impl ModelClient {
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
provider: ModelProviderInfo,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
@@ -73,7 +80,7 @@ impl ModelClient {
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
auth,
|
||||
auth_manager,
|
||||
client: reqwest::Client::new(),
|
||||
provider,
|
||||
session_id,
|
||||
@@ -134,14 +141,29 @@ impl ModelClient {
|
||||
return stream_from_fixture(path, self.provider.clone()).await;
|
||||
}
|
||||
|
||||
let auth = self.auth.clone();
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let auth = auth_manager.as_ref().and_then(|m| m.auth());
|
||||
|
||||
let auth_mode = auth.as_ref().map(|a| a.mode);
|
||||
|
||||
let store = prompt.store && auth_mode != Some(AuthMode::ChatGPT);
|
||||
|
||||
let full_instructions = prompt.get_full_instructions(&self.config.model_family);
|
||||
let tools_json = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
let mut tools_json = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
// ChatGPT backend expects the preview name for web search.
|
||||
if auth_mode == Some(AuthMode::ChatGPT) {
|
||||
for tool in &mut tools_json {
|
||||
if let Some(map) = tool.as_object_mut()
|
||||
&& map.get("type").and_then(|v| v.as_str()) == Some("web_search")
|
||||
{
|
||||
map.insert(
|
||||
"type".to_string(),
|
||||
serde_json::Value::String("web_search_preview".to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let reasoning = create_reasoning_param_for_request(
|
||||
&self.config.model_family,
|
||||
self.effort,
|
||||
@@ -158,6 +180,19 @@ impl ModelClient {
|
||||
|
||||
let input_with_instructions = prompt.get_formatted_input();
|
||||
|
||||
// Only include `text.verbosity` for GPT-5 family models
|
||||
let text = if self.config.model_family.family == "gpt-5" {
|
||||
create_text_param_for_request(self.config.model_verbosity)
|
||||
} else {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored for non-gpt-5 model family: {}",
|
||||
self.config.model_family.family
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
let payload = ResponsesApiRequest {
|
||||
model: &self.config.model,
|
||||
instructions: &full_instructions,
|
||||
@@ -170,6 +205,7 @@ impl ModelClient {
|
||||
stream: true,
|
||||
include,
|
||||
prompt_cache_key: Some(self.session_id.to_string()),
|
||||
text,
|
||||
};
|
||||
|
||||
let mut attempt = 0;
|
||||
@@ -202,12 +238,9 @@ impl ModelClient {
|
||||
req_builder = req_builder.header("chatgpt-account-id", account_id);
|
||||
}
|
||||
|
||||
let originator = self
|
||||
.config
|
||||
.internal_originator
|
||||
.as_deref()
|
||||
.unwrap_or("codex_cli_rs");
|
||||
let originator = &self.config.responses_originator_header;
|
||||
req_builder = req_builder.header("originator", originator);
|
||||
req_builder = req_builder.header("User-Agent", get_codex_user_agent(Some(originator)));
|
||||
|
||||
let res = req_builder.send().await;
|
||||
if let Ok(resp) = &res {
|
||||
@@ -245,6 +278,13 @@ impl ModelClient {
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| s.parse::<u64>().ok());
|
||||
|
||||
if status == StatusCode::UNAUTHORIZED
|
||||
&& let Some(manager) = auth_manager.as_ref()
|
||||
&& manager.auth().is_some()
|
||||
{
|
||||
let _ = manager.refresh_token().await;
|
||||
}
|
||||
|
||||
// The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx
|
||||
// errors. When we bubble early with only the HTTP status the caller sees an opaque
|
||||
// "unexpected status 400 Bad Request" which makes debugging nearly impossible.
|
||||
@@ -252,7 +292,10 @@ impl ModelClient {
|
||||
// exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is
|
||||
// small and this branch only runs on error paths so the extra allocation is
|
||||
// negligible.
|
||||
if !(status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error()) {
|
||||
if !(status == StatusCode::TOO_MANY_REQUESTS
|
||||
|| status == StatusCode::UNAUTHORIZED
|
||||
|| status.is_server_error())
|
||||
{
|
||||
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
|
||||
let body = res.text().await.unwrap_or_default();
|
||||
return Err(CodexErr::UnexpectedStatus(status, body));
|
||||
@@ -261,14 +304,18 @@ impl ModelClient {
|
||||
if status == StatusCode::TOO_MANY_REQUESTS {
|
||||
let body = res.json::<ErrorResponse>().await.ok();
|
||||
if let Some(ErrorResponse {
|
||||
error: Error { r#type, .. },
|
||||
error:
|
||||
Error {
|
||||
r#type: Some(error_type),
|
||||
..
|
||||
},
|
||||
}) = body
|
||||
{
|
||||
if r#type == "usage_limit_reached" {
|
||||
if error_type == "usage_limit_reached" {
|
||||
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
|
||||
plan_type: auth.and_then(|a| a.get_plan_type()),
|
||||
}));
|
||||
} else if r#type == "usage_not_included" {
|
||||
} else if error_type == "usage_not_included" {
|
||||
return Err(CodexErr::UsageNotIncluded);
|
||||
}
|
||||
}
|
||||
@@ -301,6 +348,30 @@ impl ModelClient {
|
||||
pub fn get_provider(&self) -> ModelProviderInfo {
|
||||
self.provider.clone()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model slug.
|
||||
pub fn get_model(&self) -> String {
|
||||
self.config.model.clone()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model family.
|
||||
pub fn get_model_family(&self) -> ModelFamily {
|
||||
self.config.model_family.clone()
|
||||
}
|
||||
|
||||
/// Returns the current reasoning effort setting.
|
||||
pub fn get_reasoning_effort(&self) -> ReasoningEffortConfig {
|
||||
self.effort
|
||||
}
|
||||
|
||||
/// Returns the current reasoning summary setting.
|
||||
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
|
||||
self.summary
|
||||
}
|
||||
|
||||
pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> {
|
||||
self.auth_manager.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
@@ -364,13 +435,14 @@ async fn process_sse<S>(
|
||||
// If the stream stays completely silent for an extended period treat it as disconnected.
|
||||
// The response id returned from the "complete" message.
|
||||
let mut response_completed: Option<ResponseCompleted> = None;
|
||||
let mut response_error: Option<CodexErr> = None;
|
||||
|
||||
loop {
|
||||
let sse = match timeout(idle_timeout, stream.next()).await {
|
||||
Ok(Some(Ok(sse))) => sse,
|
||||
Ok(Some(Err(e))) => {
|
||||
debug!("SSE Error: {e:#}");
|
||||
let event = CodexErr::Stream(e.to_string());
|
||||
let event = CodexErr::Stream(e.to_string(), None);
|
||||
let _ = tx_event.send(Err(event)).await;
|
||||
return;
|
||||
}
|
||||
@@ -388,9 +460,10 @@ async fn process_sse<S>(
|
||||
}
|
||||
None => {
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream(
|
||||
.send(Err(response_error.unwrap_or(CodexErr::Stream(
|
||||
"stream closed before response.completed".into(),
|
||||
)))
|
||||
None,
|
||||
))))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
@@ -398,13 +471,17 @@ async fn process_sse<S>(
|
||||
}
|
||||
Err(_) => {
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream("idle timeout waiting for SSE".into())))
|
||||
.send(Err(CodexErr::Stream(
|
||||
"idle timeout waiting for SSE".into(),
|
||||
None,
|
||||
)))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
trace!("SSE event: {}", sse.data);
|
||||
let raw = sse.data.clone();
|
||||
trace!("SSE event: {}", raw);
|
||||
|
||||
let event: SseEvent = match serde_json::from_str(&sse.data) {
|
||||
Ok(event) => event,
|
||||
@@ -476,15 +553,25 @@ async fn process_sse<S>(
|
||||
}
|
||||
"response.failed" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
let error = resp_val
|
||||
.get("error")
|
||||
.and_then(|v| v.get("message"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("response.failed event received");
|
||||
response_error = Some(CodexErr::Stream(
|
||||
"response.failed event received".to_string(),
|
||||
None,
|
||||
));
|
||||
|
||||
let _ = tx_event
|
||||
.send(Err(CodexErr::Stream(error.to_string())))
|
||||
.await;
|
||||
let error = resp_val.get("error");
|
||||
|
||||
if let Some(error) = error {
|
||||
match serde_json::from_value::<Error>(error.clone()) {
|
||||
Ok(error) => {
|
||||
let delay = try_parse_retry_after(&error);
|
||||
let message = error.message.unwrap_or_default();
|
||||
response_error = Some(CodexErr::Stream(message, delay));
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("failed to parse ErrorResponse: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Final response completed – includes array of output items & id
|
||||
@@ -503,15 +590,39 @@ async fn process_sse<S>(
|
||||
}
|
||||
"response.content_part.done"
|
||||
| "response.function_call_arguments.delta"
|
||||
| "response.custom_tool_call_input.delta"
|
||||
| "response.custom_tool_call_input.done" // also emitted as response.output_item.done
|
||||
| "response.in_progress"
|
||||
| "response.output_item.added"
|
||||
| "response.output_text.done"
|
||||
| "response.reasoning_summary_part.added"
|
||||
| "response.reasoning_summary_text.done" => {
|
||||
// Currently, we ignore these events, but we handle them
|
||||
// separately to skip the logging message in the `other` case.
|
||||
| "response.output_text.done" => {
|
||||
if event.kind == "response.output_item.added"
|
||||
&& let Some(item) = event.item.as_ref()
|
||||
{
|
||||
// Detect web_search_call begin and forward a synthetic event upstream.
|
||||
if let Some(ty) = item.get("type").and_then(|v| v.as_str())
|
||||
&& ty == "web_search_call"
|
||||
{
|
||||
let call_id = item
|
||||
.get("id")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let ev = ResponseEvent::WebSearchCallBegin { call_id, query: None };
|
||||
if tx_event.send(Ok(ev)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
other => debug!(other, "sse event"),
|
||||
"response.reasoning_summary_part.added" => {
|
||||
// Boundary between reasoning summary sections (e.g., titles).
|
||||
let event = ResponseEvent::ReasoningSummaryPartAdded;
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_text.done" => {}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -542,10 +653,42 @@ async fn stream_from_fixture(
|
||||
Ok(ResponseStream { rx_event })
|
||||
}
|
||||
|
||||
fn rate_limit_regex() -> &'static Regex {
|
||||
static RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
#[expect(clippy::unwrap_used)]
|
||||
RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap())
|
||||
}
|
||||
|
||||
fn try_parse_retry_after(err: &Error) -> Option<Duration> {
|
||||
if err.code != Some("rate_limit_exceeded".to_string()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// parse the Please try again in 1.898s format using regex
|
||||
let re = rate_limit_regex();
|
||||
if let Some(message) = &err.message
|
||||
&& let Some(captures) = re.captures(message)
|
||||
{
|
||||
let seconds = captures.get(1);
|
||||
let unit = captures.get(2);
|
||||
|
||||
if let (Some(value), Some(unit)) = (seconds, unit) {
|
||||
let value = value.as_str().parse::<f64>().ok()?;
|
||||
let unit = unit.as_str();
|
||||
|
||||
if unit == "s" {
|
||||
return Some(Duration::from_secs_f64(value));
|
||||
} else if unit == "ms" {
|
||||
return Some(Duration::from_millis(value as u64));
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -727,13 +870,49 @@ mod tests {
|
||||
matches!(events[0], Ok(ResponseEvent::OutputItemDone(_)));
|
||||
|
||||
match &events[1] {
|
||||
Err(CodexErr::Stream(msg)) => {
|
||||
Err(CodexErr::Stream(msg, _)) => {
|
||||
assert_eq!(msg, "stream closed before response.completed")
|
||||
}
|
||||
other => panic!("unexpected second event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn error_when_error_event() {
|
||||
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;
|
||||
|
||||
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()], provider).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Err(CodexErr::Stream(msg, delay)) => {
|
||||
assert_eq!(
|
||||
msg,
|
||||
"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."
|
||||
);
|
||||
assert_eq!(*delay, Some(Duration::from_secs_f64(11.054)));
|
||||
}
|
||||
other => panic!("unexpected second event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
// ────────────────────────────
|
||||
// Table-driven test from `main`
|
||||
// ────────────────────────────
|
||||
@@ -832,4 +1011,27 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_parse_retry_after() {
|
||||
let err = Error {
|
||||
r#type: None,
|
||||
message: Some("Rate limit reached for gpt-5 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
|
||||
code: Some("rate_limit_exceeded".to_string()),
|
||||
};
|
||||
|
||||
let delay = try_parse_retry_after(&err);
|
||||
assert_eq!(delay, Some(Duration::from_millis(28)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_parse_retry_after_no_delay() {
|
||||
let err = Error {
|
||||
r#type: None,
|
||||
message: Some("Rate limit reached for gpt-5 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
|
||||
code: Some("rate_limit_exceeded".to_string()),
|
||||
};
|
||||
let delay = try_parse_retry_after(&err);
|
||||
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::config_types::Verbosity as VerbosityConfig;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::protocol::TokenUsage;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use futures::Stream;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
@@ -23,62 +20,19 @@ use tokio::sync::mpsc;
|
||||
/// with this content.
|
||||
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
||||
|
||||
/// wraps environment context message in a tag for the model to parse more easily.
|
||||
const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>\n\n";
|
||||
const ENVIRONMENT_CONTEXT_END: &str = "\n\n</environment_context>";
|
||||
|
||||
/// wraps user instructions message in a tag for the model to parse more easily.
|
||||
const USER_INSTRUCTIONS_START: &str = "<user_instructions>\n\n";
|
||||
const USER_INSTRUCTIONS_END: &str = "\n\n</user_instructions>";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct EnvironmentContext {
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
}
|
||||
|
||||
impl Display for EnvironmentContext {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Current working directory: {}",
|
||||
self.cwd.to_string_lossy()
|
||||
)?;
|
||||
writeln!(f, "Approval policy: {}", self.approval_policy)?;
|
||||
writeln!(f, "Sandbox policy: {}", self.sandbox_policy)?;
|
||||
|
||||
let network_access = match self.sandbox_policy.clone() {
|
||||
SandboxPolicy::DangerFullAccess => "enabled",
|
||||
SandboxPolicy::ReadOnly => "restricted",
|
||||
SandboxPolicy::WorkspaceWrite { network_access, .. } => {
|
||||
if network_access {
|
||||
"enabled"
|
||||
} else {
|
||||
"restricted"
|
||||
}
|
||||
}
|
||||
};
|
||||
writeln!(f, "Network access: {network_access}")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// API request payload for a single model turn.
|
||||
/// API request payload for a single model turn
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct Prompt {
|
||||
/// Conversation context input items.
|
||||
pub input: Vec<ResponseItem>,
|
||||
/// Optional instructions from the user to amend to the built-in agent
|
||||
/// instructions.
|
||||
pub user_instructions: Option<String>,
|
||||
|
||||
/// Whether to store response on server side (disable_response_storage = !store).
|
||||
pub store: bool,
|
||||
|
||||
/// A list of key-value pairs that will be added as a developer message
|
||||
/// for the model to use
|
||||
pub environment_context: Option<EnvironmentContext>,
|
||||
|
||||
/// Tools available to the model, including additional tools sourced from
|
||||
/// external MCP servers.
|
||||
pub tools: Vec<OpenAiTool>,
|
||||
@@ -94,42 +48,36 @@ impl Prompt {
|
||||
.as_deref()
|
||||
.unwrap_or(BASE_INSTRUCTIONS);
|
||||
let mut sections: Vec<&str> = vec![base];
|
||||
if model.needs_special_apply_patch_instructions {
|
||||
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if either:
|
||||
// - the model needs special instructions (4.1), or
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
OpenAiTool::Function(f) => f.name == "apply_patch",
|
||||
OpenAiTool::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
&& (model.needs_special_apply_patch_instructions || !is_apply_patch_tool_present)
|
||||
{
|
||||
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
|
||||
}
|
||||
Cow::Owned(sections.join("\n"))
|
||||
}
|
||||
|
||||
fn get_formatted_user_instructions(&self) -> Option<String> {
|
||||
self.user_instructions
|
||||
.as_ref()
|
||||
.map(|ui| format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"))
|
||||
}
|
||||
|
||||
fn get_formatted_environment_context(&self) -> Option<String> {
|
||||
self.environment_context
|
||||
.as_ref()
|
||||
.map(|ec| format!("{ENVIRONMENT_CONTEXT_START}{ec}{ENVIRONMENT_CONTEXT_END}"))
|
||||
}
|
||||
|
||||
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
|
||||
let mut input_with_instructions = Vec::with_capacity(self.input.len() + 2);
|
||||
if let Some(ec) = self.get_formatted_environment_context() {
|
||||
input_with_instructions.push(ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText { text: ec }],
|
||||
});
|
||||
self.input.clone()
|
||||
}
|
||||
|
||||
/// Creates a formatted user instructions message from a string
|
||||
pub(crate) fn format_user_instructions_message(ui: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"),
|
||||
}],
|
||||
}
|
||||
if let Some(ui) = self.get_formatted_user_instructions() {
|
||||
input_with_instructions.push(ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText { text: ui }],
|
||||
});
|
||||
}
|
||||
input_with_instructions.extend(self.input.clone());
|
||||
input_with_instructions
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,55 +92,41 @@ pub enum ResponseEvent {
|
||||
OutputTextDelta(String),
|
||||
ReasoningSummaryDelta(String),
|
||||
ReasoningContentDelta(String),
|
||||
ReasoningSummaryPartAdded,
|
||||
WebSearchCallBegin {
|
||||
call_id: String,
|
||||
query: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct Reasoning {
|
||||
pub(crate) effort: OpenAiReasoningEffort,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) summary: Option<OpenAiReasoningSummary>,
|
||||
pub(crate) effort: ReasoningEffortConfig,
|
||||
pub(crate) summary: ReasoningSummaryConfig,
|
||||
}
|
||||
|
||||
/// Controls under the `text` field in the Responses API for GPT-5.
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
pub(crate) struct TextControls {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) verbosity: Option<OpenAiVerbosity>,
|
||||
}
|
||||
|
||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum OpenAiReasoningEffort {
|
||||
pub(crate) enum OpenAiVerbosity {
|
||||
Low,
|
||||
#[default]
|
||||
Medium,
|
||||
High,
|
||||
}
|
||||
|
||||
impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
|
||||
fn from(effort: ReasoningEffortConfig) -> Self {
|
||||
match effort {
|
||||
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
|
||||
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
|
||||
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
|
||||
ReasoningEffortConfig::None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A summary of the reasoning performed by the model. This can be useful for
|
||||
/// debugging and understanding the model's reasoning process.
|
||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
|
||||
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum OpenAiReasoningSummary {
|
||||
#[default]
|
||||
Auto,
|
||||
Concise,
|
||||
Detailed,
|
||||
}
|
||||
|
||||
impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
|
||||
fn from(summary: ReasoningSummaryConfig) -> Self {
|
||||
match summary {
|
||||
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
|
||||
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
|
||||
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
|
||||
ReasoningSummaryConfig::None => None,
|
||||
impl From<VerbosityConfig> for OpenAiVerbosity {
|
||||
fn from(v: VerbosityConfig) -> Self {
|
||||
match v {
|
||||
VerbosityConfig::Low => OpenAiVerbosity::Low,
|
||||
VerbosityConfig::Medium => OpenAiVerbosity::Medium,
|
||||
VerbosityConfig::High => OpenAiVerbosity::High,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,6 +151,8 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
pub(crate) include: Vec<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) prompt_cache_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) text: Option<TextControls>,
|
||||
}
|
||||
|
||||
pub(crate) fn create_reasoning_param_for_request(
|
||||
@@ -225,17 +161,20 @@ pub(crate) fn create_reasoning_param_for_request(
|
||||
summary: ReasoningSummaryConfig,
|
||||
) -> Option<Reasoning> {
|
||||
if model_family.supports_reasoning_summaries {
|
||||
let effort: Option<OpenAiReasoningEffort> = effort.into();
|
||||
let effort = effort?;
|
||||
Some(Reasoning {
|
||||
effort,
|
||||
summary: summary.into(),
|
||||
})
|
||||
Some(Reasoning { effort, summary })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_text_param_for_request(
|
||||
verbosity: Option<VerbosityConfig>,
|
||||
) -> Option<TextControls> {
|
||||
verbosity.map(|v| TextControls {
|
||||
verbosity: Some(v.into()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct ResponseStream {
|
||||
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
|
||||
}
|
||||
@@ -250,7 +189,6 @@ impl Stream for ResponseStream {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::expect_used)]
|
||||
use crate::model_family::find_family_for_model;
|
||||
|
||||
use super::*;
|
||||
@@ -258,7 +196,6 @@ mod tests {
|
||||
#[test]
|
||||
fn get_full_instructions_no_user_content() {
|
||||
let prompt = Prompt {
|
||||
user_instructions: Some("custom instruction".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
|
||||
@@ -266,4 +203,57 @@ mod tests {
|
||||
let full = prompt.get_full_instructions(&model_family);
|
||||
assert_eq!(full, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serializes_text_verbosity_when_set() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
let tools: Vec<serde_json::Value> = vec![];
|
||||
let req = ResponsesApiRequest {
|
||||
model: "gpt-5",
|
||||
instructions: "i",
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning: None,
|
||||
store: true,
|
||||
stream: true,
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: Some(TextControls {
|
||||
verbosity: Some(OpenAiVerbosity::Low),
|
||||
}),
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
assert_eq!(
|
||||
v.get("text")
|
||||
.and_then(|t| t.get("verbosity"))
|
||||
.and_then(|s| s.as_str()),
|
||||
Some("low")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn omits_text_when_not_set() {
|
||||
let input: Vec<ResponseItem> = vec![];
|
||||
let tools: Vec<serde_json::Value> = vec![];
|
||||
let req = ResponsesApiRequest {
|
||||
model: "gpt-5",
|
||||
instructions: "i",
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning: None,
|
||||
store: true,
|
||||
stream: true,
|
||||
include: vec![],
|
||||
prompt_cache_key: None,
|
||||
text: None,
|
||||
};
|
||||
|
||||
let v = serde_json::to_value(&req).expect("json");
|
||||
assert!(v.get("text").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
30
codex-rs/core/src/codex_conversation.rs
Normal file
30
codex-rs/core/src/codex_conversation.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use crate::codex::Codex;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::Op;
|
||||
use crate::protocol::Submission;
|
||||
|
||||
pub struct CodexConversation {
|
||||
codex: Codex,
|
||||
}
|
||||
|
||||
/// Conduit for the bidirectional stream of messages that compose a conversation
|
||||
/// in Codex.
|
||||
impl CodexConversation {
|
||||
pub(crate) fn new(codex: Codex) -> Self {
|
||||
Self { codex }
|
||||
}
|
||||
|
||||
pub async fn submit(&self, op: Op) -> CodexResult<String> {
|
||||
self.codex.submit(op).await
|
||||
}
|
||||
|
||||
/// Use sparingly: this is intended to be removed soon.
|
||||
pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> {
|
||||
self.codex.submit_with_id(sub).await
|
||||
}
|
||||
|
||||
pub async fn next_event(&self) -> CodexResult<Event> {
|
||||
self.codex.next_event().await
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::Codex;
|
||||
use crate::CodexSpawnOk;
|
||||
use crate::config::Config;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::util::notify_on_sigint;
|
||||
use codex_login::CodexAuth;
|
||||
use tokio::sync::Notify;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Represents an active Codex conversation, including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
pub struct CodexConversation {
|
||||
pub codex: Codex,
|
||||
pub session_id: Uuid,
|
||||
pub session_configured: Event,
|
||||
pub ctrl_c: Arc<Notify>,
|
||||
}
|
||||
|
||||
/// Spawn a new [`Codex`] and initialize the session.
|
||||
///
|
||||
/// Returns the wrapped [`Codex`] **and** the `SessionInitialized` event that
|
||||
/// is received as a response to the initial `ConfigureSession` submission so
|
||||
/// that callers can surface the information to the UI.
|
||||
pub async fn init_codex(config: Config) -> anyhow::Result<CodexConversation> {
|
||||
let ctrl_c = notify_on_sigint();
|
||||
let auth = CodexAuth::from_codex_home(&config.codex_home)?;
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
init_id,
|
||||
session_id,
|
||||
} = Codex::spawn(config, auth, ctrl_c.clone()).await?;
|
||||
|
||||
// The first event must be `SessionInitialized`. Validate and forward it to
|
||||
// the caller so that they can display it in the conversation history.
|
||||
let event = codex.next_event().await?;
|
||||
if event.id != init_id
|
||||
|| !matches!(
|
||||
&event,
|
||||
Event {
|
||||
id: _id,
|
||||
msg: EventMsg::SessionConfigured(_),
|
||||
}
|
||||
)
|
||||
{
|
||||
return Err(anyhow::anyhow!(
|
||||
"expected SessionInitialized but got {event:?}"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(CodexConversation {
|
||||
codex,
|
||||
session_id,
|
||||
session_configured: event,
|
||||
ctrl_c,
|
||||
})
|
||||
}
|
||||
@@ -1,14 +1,13 @@
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config_types::History;
|
||||
use crate::config_types::McpServerConfig;
|
||||
use crate::config_types::ReasoningEffort;
|
||||
use crate::config_types::ReasoningSummary;
|
||||
use crate::config_types::SandboxMode;
|
||||
use crate::config_types::SandboxWorkspaceWrite;
|
||||
use crate::config_types::ShellEnvironmentPolicy;
|
||||
use crate::config_types::ShellEnvironmentPolicyToml;
|
||||
use crate::config_types::Tui;
|
||||
use crate::config_types::UriBasedFileOpener;
|
||||
use crate::config_types::Verbosity;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::model_family::find_family_for_model;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
@@ -16,6 +15,10 @@ use crate::model_provider_info::built_in_model_providers;
|
||||
use crate::openai_model_info::get_model_info;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use codex_login::AuthMode;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
@@ -34,6 +37,8 @@ pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
|
||||
|
||||
const CONFIG_TOML_FILE: &str = "config.toml";
|
||||
|
||||
const DEFAULT_RESPONSES_ORIGINATOR_HEADER: &str = "codex_cli_rs";
|
||||
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
@@ -139,14 +144,17 @@ pub struct Config {
|
||||
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
|
||||
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
|
||||
/// If not "none", the value to use for `reasoning.effort` when making a
|
||||
/// request using the Responses API.
|
||||
/// Value to use for `reasoning.effort` when making a request using the
|
||||
/// Responses API.
|
||||
pub model_reasoning_effort: ReasoningEffort,
|
||||
|
||||
/// If not "none", the value to use for `reasoning.summary` when making a
|
||||
/// request using the Responses API.
|
||||
pub model_reasoning_summary: ReasoningSummary,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: String,
|
||||
|
||||
@@ -156,8 +164,20 @@ pub struct Config {
|
||||
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
|
||||
pub include_plan_tool: bool,
|
||||
|
||||
/// Include the `apply_patch` tool for models that benefit from invoking
|
||||
/// file edits as a structured tool call. When unset, this falls back to the
|
||||
/// model family's default preference.
|
||||
pub include_apply_patch_tool: bool,
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub internal_originator: Option<String>,
|
||||
pub responses_originator_header: String,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: AuthMode,
|
||||
|
||||
pub use_experimental_streamable_shell_tool: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -248,10 +268,61 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
// Mark the project as trusted. toml_edit is very good at handling
|
||||
// missing properties
|
||||
// Ensure we render a human-friendly structure:
|
||||
//
|
||||
// [projects]
|
||||
// [projects."/path/to/project"]
|
||||
// trust_level = "trusted"
|
||||
//
|
||||
// rather than inline tables like:
|
||||
//
|
||||
// [projects]
|
||||
// "/path/to/project" = { trust_level = "trusted" }
|
||||
let project_key = project_path.to_string_lossy().to_string();
|
||||
doc["projects"][project_key.as_str()]["trust_level"] = toml_edit::value("trusted");
|
||||
|
||||
// Ensure top-level `projects` exists as a non-inline, explicit table. If it
|
||||
// exists but was previously represented as a non-table (e.g., inline),
|
||||
// replace it with an explicit table.
|
||||
let mut created_projects_table = false;
|
||||
{
|
||||
let root = doc.as_table_mut();
|
||||
let needs_table = !root.contains_key("projects")
|
||||
|| root.get("projects").and_then(|i| i.as_table()).is_none();
|
||||
if needs_table {
|
||||
root.insert("projects", toml_edit::table());
|
||||
created_projects_table = true;
|
||||
}
|
||||
}
|
||||
let Some(projects_tbl) = doc["projects"].as_table_mut() else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"projects table missing after initialization"
|
||||
));
|
||||
};
|
||||
|
||||
// If we created the `projects` table ourselves, keep it implicit so we
|
||||
// don't render a standalone `[projects]` header.
|
||||
if created_projects_table {
|
||||
projects_tbl.set_implicit(true);
|
||||
}
|
||||
|
||||
// Ensure the per-project entry is its own explicit table. If it exists but
|
||||
// is not a table (e.g., an inline table), replace it with an explicit table.
|
||||
let needs_proj_table = !projects_tbl.contains_key(project_key.as_str())
|
||||
|| projects_tbl
|
||||
.get(project_key.as_str())
|
||||
.and_then(|i| i.as_table())
|
||||
.is_none();
|
||||
if needs_proj_table {
|
||||
projects_tbl.insert(project_key.as_str(), toml_edit::table());
|
||||
}
|
||||
let Some(proj_tbl) = projects_tbl
|
||||
.get_mut(project_key.as_str())
|
||||
.and_then(|i| i.as_table_mut())
|
||||
else {
|
||||
return Err(anyhow::anyhow!("project table missing for {}", project_key));
|
||||
};
|
||||
proj_tbl.set_implicit(false);
|
||||
proj_tbl["trust_level"] = toml_edit::value("trusted");
|
||||
|
||||
// ensure codex_home exists
|
||||
std::fs::create_dir_all(codex_home)?;
|
||||
@@ -387,6 +458,8 @@ pub struct ConfigToml {
|
||||
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
@@ -400,10 +473,18 @@ pub struct ConfigToml {
|
||||
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
|
||||
/// The value for the `originator` header included with Responses API requests.
|
||||
pub internal_originator: Option<String>,
|
||||
pub responses_originator_header_internal_override: Option<String>,
|
||||
|
||||
pub projects: Option<HashMap<String, ProjectConfig>>,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: Option<AuthMode>,
|
||||
|
||||
/// Nested tools section for feature toggles
|
||||
pub tools: Option<ToolsToml>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
@@ -411,6 +492,13 @@ pub struct ProjectConfig {
|
||||
pub trust_level: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default)]
|
||||
pub struct ToolsToml {
|
||||
// Renamed from `web_search_request`; keep alias for backwards compatibility.
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
}
|
||||
|
||||
impl ConfigToml {
|
||||
/// Derive the effective sandbox policy from the configuration.
|
||||
fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy {
|
||||
@@ -440,10 +528,27 @@ impl ConfigToml {
|
||||
pub fn is_cwd_trusted(&self, resolved_cwd: &Path) -> bool {
|
||||
let projects = self.projects.clone().unwrap_or_default();
|
||||
|
||||
projects
|
||||
.get(&resolved_cwd.to_string_lossy().to_string())
|
||||
.map(|p| p.trust_level.clone().unwrap_or("".to_string()) == "trusted")
|
||||
.unwrap_or(false)
|
||||
let is_path_trusted = |path: &Path| {
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
projects
|
||||
.get(&path_str)
|
||||
.map(|p| p.trust_level.as_deref() == Some("trusted"))
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
// Fast path: exact cwd match
|
||||
if is_path_trusted(resolved_cwd) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If cwd lives inside a git worktree, check whether the root git project
|
||||
// (the primary repository working directory) is trusted. This lets
|
||||
// worktrees inherit trust from the main project.
|
||||
if let Some(root_project) = resolve_root_git_project_for_trust(resolved_cwd) {
|
||||
return is_path_trusted(&root_project);
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn get_config_profile(
|
||||
@@ -480,8 +585,10 @@ pub struct ConfigOverrides {
|
||||
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub base_instructions: Option<String>,
|
||||
pub include_plan_tool: Option<bool>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub disable_response_storage: Option<bool>,
|
||||
pub show_raw_agent_reasoning: Option<bool>,
|
||||
pub tools_web_search_request: Option<bool>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -505,8 +612,10 @@ impl Config {
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
disable_response_storage,
|
||||
show_raw_agent_reasoning,
|
||||
tools_web_search_request: override_tools_web_search_request,
|
||||
} = overrides;
|
||||
|
||||
let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
|
||||
@@ -545,7 +654,7 @@ impl Config {
|
||||
})?
|
||||
.clone();
|
||||
|
||||
let shell_environment_policy = cfg.shell_environment_policy.into();
|
||||
let shell_environment_policy = cfg.shell_environment_policy.clone().into();
|
||||
|
||||
let resolved_cwd = {
|
||||
use std::env;
|
||||
@@ -566,7 +675,11 @@ impl Config {
|
||||
}
|
||||
};
|
||||
|
||||
let history = cfg.history.unwrap_or_default();
|
||||
let history = cfg.history.clone().unwrap_or_default();
|
||||
|
||||
let tools_web_search_request = override_tools_web_search_request
|
||||
.or(cfg.tools.as_ref().and_then(|t| t.web_search))
|
||||
.unwrap_or(false);
|
||||
|
||||
let model = model
|
||||
.or(config_profile.model)
|
||||
@@ -581,6 +694,7 @@ impl Config {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
}
|
||||
});
|
||||
|
||||
@@ -607,6 +721,10 @@ impl Config {
|
||||
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
||||
let base_instructions = base_instructions.or(file_base_instructions);
|
||||
|
||||
let responses_originator_header: String = cfg
|
||||
.responses_originator_header_internal_override
|
||||
.unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
model_family,
|
||||
@@ -635,7 +753,7 @@ impl Config {
|
||||
codex_home,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
tui: cfg.tui.unwrap_or_default(),
|
||||
tui: cfg.tui.clone().unwrap_or_default(),
|
||||
codex_linux_sandbox_exe,
|
||||
|
||||
hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
|
||||
@@ -651,15 +769,21 @@ impl Config {
|
||||
.model_reasoning_summary
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url)
|
||||
.or(cfg.chatgpt_base_url.clone())
|
||||
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
||||
|
||||
experimental_resume,
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
internal_originator: cfg.internal_originator,
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
responses_originator_header,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
.experimental_use_exec_command_tool
|
||||
.unwrap_or(false),
|
||||
};
|
||||
Ok(config)
|
||||
}
|
||||
@@ -739,10 +863,10 @@ fn default_model() -> String {
|
||||
pub fn find_codex_home() -> std::io::Result<PathBuf> {
|
||||
// Honor the `CODEX_HOME` environment variable when it is set to allow users
|
||||
// (and tests) to override the default location.
|
||||
if let Ok(val) = std::env::var("CODEX_HOME") {
|
||||
if !val.is_empty() {
|
||||
return PathBuf::from(val).canonicalize();
|
||||
}
|
||||
if let Ok(val) = std::env::var("CODEX_HOME")
|
||||
&& !val.is_empty()
|
||||
{
|
||||
return PathBuf::from(val).canonicalize();
|
||||
}
|
||||
|
||||
let mut p = home_dir().ok_or_else(|| {
|
||||
@@ -765,7 +889,6 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
use crate::config_types::HistoryPersistence;
|
||||
|
||||
use super::*;
|
||||
@@ -1019,11 +1142,16 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::High,
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
internal_originator: None,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -1070,11 +1198,16 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
internal_originator: None,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -1136,15 +1269,90 @@ disable_response_storage = true
|
||||
show_raw_agent_reasoning: false,
|
||||
model_reasoning_effort: ReasoningEffort::default(),
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
experimental_resume: None,
|
||||
base_instructions: None,
|
||||
include_plan_tool: false,
|
||||
internal_originator: None,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
responses_originator_header: "codex_cli_rs".to_string(),
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
|
||||
// Call the function under test
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
|
||||
// Read back the generated config.toml and assert exact contents
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{}'", raw_path)
|
||||
} else {
|
||||
format!("\"{}\"", raw_path)
|
||||
};
|
||||
let expected = format!(
|
||||
r#"[projects.{path_str}]
|
||||
trust_level = "trusted"
|
||||
"#
|
||||
);
|
||||
assert_eq!(contents, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_project_trusted_converts_inline_to_explicit() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let project_dir = TempDir::new().unwrap();
|
||||
|
||||
// Seed config.toml with an inline project entry under [projects]
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
let raw_path = project_dir.path().to_string_lossy();
|
||||
let path_str = if raw_path.contains('\\') {
|
||||
format!("'{}'", raw_path)
|
||||
} else {
|
||||
format!("\"{}\"", raw_path)
|
||||
};
|
||||
// Use a quoted key so backslashes don't require escaping on Windows
|
||||
let initial = format!(
|
||||
r#"[projects]
|
||||
{path_str} = {{ trust_level = "untrusted" }}
|
||||
"#
|
||||
);
|
||||
std::fs::create_dir_all(codex_home.path())?;
|
||||
std::fs::write(&config_path, initial)?;
|
||||
|
||||
// Run the function; it should convert to explicit tables and set trusted
|
||||
set_project_trusted(codex_home.path(), project_dir.path())?;
|
||||
|
||||
let contents = std::fs::read_to_string(&config_path)?;
|
||||
|
||||
// Assert exact output after conversion to explicit table
|
||||
let expected = format!(
|
||||
r#"[projects]
|
||||
|
||||
[projects.{path_str}]
|
||||
trust_level = "trusted"
|
||||
"#
|
||||
);
|
||||
assert_eq!(contents, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// No test enforcing the presence of a standalone [projects] header.
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use serde::Deserialize;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::config_types::ReasoningEffort;
|
||||
use crate::config_types::ReasoningSummary;
|
||||
use crate::config_types::Verbosity;
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
@@ -17,6 +18,7 @@ pub struct ConfigProfile {
|
||||
pub disable_response_storage: Option<bool>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use strum_macros::Display;
|
||||
use wildmatch::WildMatchPattern;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display;
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct McpServerConfig {
|
||||
@@ -78,20 +78,6 @@ pub enum HistoryPersistence {
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum SandboxMode {
|
||||
#[serde(rename = "read-only")]
|
||||
#[default]
|
||||
ReadOnly,
|
||||
|
||||
#[serde(rename = "workspace-write")]
|
||||
WorkspaceWrite,
|
||||
|
||||
#[serde(rename = "danger-full-access")]
|
||||
DangerFullAccess,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
@@ -227,3 +213,15 @@ pub enum ReasoningSummary {
|
||||
/// Option to disable reasoning summaries.
|
||||
None,
|
||||
}
|
||||
|
||||
/// Controls output length/detail on GPT-5 models via the Responses API.
|
||||
/// Serialized with lowercase values to match the OpenAI API.
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum Verbosity {
|
||||
Low,
|
||||
#[default]
|
||||
Medium,
|
||||
High,
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::models::ResponseItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Transcript of conversation history
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@@ -66,7 +66,7 @@ impl ConversationHistory {
|
||||
self.items.push(ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![crate::models::ContentItem::OutputText {
|
||||
content: vec![codex_protocol::models::ContentItem::OutputText {
|
||||
text: delta.to_string(),
|
||||
}],
|
||||
});
|
||||
@@ -110,6 +110,8 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
ResponseItem::Message { role, .. } => role.as_str() != "system",
|
||||
ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::Reasoning { .. } => true,
|
||||
ResponseItem::Other => false,
|
||||
@@ -118,11 +120,11 @@ fn is_api_message(message: &ResponseItem) -> bool {
|
||||
|
||||
/// Helper to append the textual content from `src` into `dst` in place.
|
||||
fn append_text_content(
|
||||
dst: &mut Vec<crate::models::ContentItem>,
|
||||
src: &Vec<crate::models::ContentItem>,
|
||||
dst: &mut Vec<codex_protocol::models::ContentItem>,
|
||||
src: &Vec<codex_protocol::models::ContentItem>,
|
||||
) {
|
||||
for c in src {
|
||||
if let crate::models::ContentItem::OutputText { text } = c {
|
||||
if let codex_protocol::models::ContentItem::OutputText { text } = c {
|
||||
append_text_delta(dst, text);
|
||||
}
|
||||
}
|
||||
@@ -130,15 +132,15 @@ fn append_text_content(
|
||||
|
||||
/// Append a single text delta to the last OutputText item in `content`, or
|
||||
/// push a new OutputText item if none exists.
|
||||
fn append_text_delta(content: &mut Vec<crate::models::ContentItem>, delta: &str) {
|
||||
if let Some(crate::models::ContentItem::OutputText { text }) = content
|
||||
fn append_text_delta(content: &mut Vec<codex_protocol::models::ContentItem>, delta: &str) {
|
||||
if let Some(codex_protocol::models::ContentItem::OutputText { text }) = content
|
||||
.iter_mut()
|
||||
.rev()
|
||||
.find(|c| matches!(c, crate::models::ContentItem::OutputText { .. }))
|
||||
.find(|c| matches!(c, codex_protocol::models::ContentItem::OutputText { .. }))
|
||||
{
|
||||
text.push_str(delta);
|
||||
} else {
|
||||
content.push(crate::models::ContentItem::OutputText {
|
||||
content.push(codex_protocol::models::ContentItem::OutputText {
|
||||
text: delta.to_string(),
|
||||
});
|
||||
}
|
||||
@@ -147,7 +149,7 @@ fn append_text_delta(content: &mut Vec<crate::models::ContentItem>, delta: &str)
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::models::ContentItem;
|
||||
use codex_protocol::models::ContentItem;
|
||||
|
||||
fn assistant_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
|
||||
228
codex-rs/core/src/conversation_manager.rs
Normal file
228
codex-rs/core/src/conversation_manager.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CodexAuth;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::codex::Codex;
|
||||
use crate::codex::CodexSpawnOk;
|
||||
use crate::codex::INITIAL_SUBMIT_ID;
|
||||
use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Represents a newly created Codex conversation, including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
pub struct NewConversation {
|
||||
pub conversation_id: Uuid,
|
||||
pub conversation: Arc<CodexConversation>,
|
||||
pub session_configured: SessionConfiguredEvent,
|
||||
}
|
||||
|
||||
/// [`ConversationManager`] is responsible for creating conversations and
|
||||
/// maintaining them in memory.
|
||||
pub struct ConversationManager {
|
||||
conversations: Arc<RwLock<HashMap<Uuid, Arc<CodexConversation>>>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
}
|
||||
|
||||
impl ConversationManager {
|
||||
pub fn new(auth_manager: Arc<AuthManager>) -> Self {
|
||||
Self {
|
||||
conversations: Arc::new(RwLock::new(HashMap::new())),
|
||||
auth_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct with a dummy AuthManager containing the provided CodexAuth.
|
||||
/// Used for integration tests: should not be used by ordinary business logic.
|
||||
pub fn with_auth(auth: CodexAuth) -> Self {
|
||||
Self::new(codex_login::AuthManager::from_auth_for_testing(auth))
|
||||
}
|
||||
|
||||
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
|
||||
self.spawn_conversation(config, self.auth_manager.clone())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn spawn_conversation(
|
||||
&self,
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
session_id: conversation_id,
|
||||
} = {
|
||||
let initial_history = None;
|
||||
Codex::spawn(config, auth_manager, initial_history).await?
|
||||
};
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
async fn finalize_spawn(
|
||||
&self,
|
||||
codex: Codex,
|
||||
conversation_id: Uuid,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// The first event must be `SessionInitialized`. Validate and forward it
|
||||
// to the caller so that they can display it in the conversation
|
||||
// history.
|
||||
let event = codex.next_event().await?;
|
||||
let session_configured = match event {
|
||||
Event {
|
||||
id,
|
||||
msg: EventMsg::SessionConfigured(session_configured),
|
||||
} if id == INITIAL_SUBMIT_ID => session_configured,
|
||||
_ => {
|
||||
return Err(CodexErr::SessionConfiguredNotFirstEvent);
|
||||
}
|
||||
};
|
||||
|
||||
let conversation = Arc::new(CodexConversation::new(codex));
|
||||
self.conversations
|
||||
.write()
|
||||
.await
|
||||
.insert(conversation_id, conversation.clone());
|
||||
|
||||
Ok(NewConversation {
|
||||
conversation_id,
|
||||
conversation,
|
||||
session_configured,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_conversation(
|
||||
&self,
|
||||
conversation_id: Uuid,
|
||||
) -> CodexResult<Arc<CodexConversation>> {
|
||||
let conversations = self.conversations.read().await;
|
||||
conversations
|
||||
.get(&conversation_id)
|
||||
.cloned()
|
||||
.ok_or_else(|| CodexErr::ConversationNotFound(conversation_id))
|
||||
}
|
||||
|
||||
/// Fork an existing conversation by dropping the last `drop_last_messages`
|
||||
/// user/assistant messages from its transcript and starting a new
|
||||
/// conversation with identical configuration (unless overridden by the
|
||||
/// caller's `config`). The new conversation will have a fresh id.
|
||||
pub async fn fork_conversation(
|
||||
&self,
|
||||
conversation_history: Vec<ResponseItem>,
|
||||
num_messages_to_drop: usize,
|
||||
config: Config,
|
||||
) -> CodexResult<NewConversation> {
|
||||
// Compute the prefix up to the cut point.
|
||||
let truncated_history =
|
||||
truncate_after_dropping_last_messages(conversation_history, num_messages_to_drop);
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
session_id: conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, Some(truncated_history)).await?;
|
||||
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a prefix of `items` obtained by dropping the last `n` user messages
|
||||
/// and all items that follow them.
|
||||
fn truncate_after_dropping_last_messages(items: Vec<ResponseItem>, n: usize) -> Vec<ResponseItem> {
|
||||
if n == 0 || items.is_empty() {
|
||||
return items;
|
||||
}
|
||||
|
||||
// Walk backwards counting only `user` Message items, find cut index.
|
||||
let mut count = 0usize;
|
||||
let mut cut_index = 0usize;
|
||||
for (idx, item) in items.iter().enumerate().rev() {
|
||||
if let ResponseItem::Message { role, .. } = item
|
||||
&& role == "user"
|
||||
{
|
||||
count += 1;
|
||||
if count == n {
|
||||
// Cut everything from this user message to the end.
|
||||
cut_index = idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if count < n {
|
||||
// If fewer than n messages exist, drop everything.
|
||||
Vec::new()
|
||||
} else {
|
||||
items.into_iter().take(cut_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
fn user_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: text.to_string(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
fn assistant_msg(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "assistant".to_string(),
|
||||
content: vec![ContentItem::OutputText {
|
||||
text: text.to_string(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drops_from_last_user_only() {
|
||||
let items = vec![
|
||||
user_msg("u1"),
|
||||
assistant_msg("a1"),
|
||||
assistant_msg("a2"),
|
||||
user_msg("u2"),
|
||||
assistant_msg("a3"),
|
||||
ResponseItem::Reasoning {
|
||||
id: "r1".to_string(),
|
||||
summary: vec![ReasoningItemReasoningSummary::SummaryText {
|
||||
text: "s".to_string(),
|
||||
}],
|
||||
content: None,
|
||||
encrypted_content: None,
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "tool".to_string(),
|
||||
arguments: "{}".to_string(),
|
||||
call_id: "c1".to_string(),
|
||||
},
|
||||
assistant_msg("a4"),
|
||||
];
|
||||
|
||||
let truncated = truncate_after_dropping_last_messages(items.clone(), 1);
|
||||
assert_eq!(
|
||||
truncated,
|
||||
vec![items[0].clone(), items[1].clone(), items[2].clone()]
|
||||
);
|
||||
|
||||
let truncated2 = truncate_after_dropping_last_messages(items, 2);
|
||||
assert!(truncated2.is_empty());
|
||||
}
|
||||
}
|
||||
121
codex-rs/core/src/environment_context.rs
Normal file
121
codex-rs/core/src/environment_context.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use strum_macros::Display as DeriveDisplay;
|
||||
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// wraps environment context message in a tag for the model to parse more easily.
|
||||
pub(crate) const ENVIRONMENT_CONTEXT_START: &str = "<environment_context>";
|
||||
pub(crate) const ENVIRONMENT_CONTEXT_END: &str = "</environment_context>";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, DeriveDisplay)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum NetworkAccess {
|
||||
Restricted,
|
||||
Enabled,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "environment_context", rename_all = "snake_case")]
|
||||
pub(crate) struct EnvironmentContext {
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub network_access: Option<NetworkAccess>,
|
||||
pub shell: Option<Shell>,
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
pub fn new(
|
||||
cwd: Option<PathBuf>,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
shell: Option<Shell>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(SandboxMode::ReadOnly),
|
||||
Some(SandboxPolicy::WorkspaceWrite { .. }) => Some(SandboxMode::WorkspaceWrite),
|
||||
None => None,
|
||||
},
|
||||
network_access: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(NetworkAccess::Enabled),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(NetworkAccess::Restricted),
|
||||
Some(SandboxPolicy::WorkspaceWrite { network_access, .. }) => {
|
||||
if network_access {
|
||||
Some(NetworkAccess::Enabled)
|
||||
} else {
|
||||
Some(NetworkAccess::Restricted)
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
shell,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
/// Serializes the environment context to XML. Libraries like `quick-xml`
|
||||
/// require custom macros to handle Enums with newtypes, so we just do it
|
||||
/// manually, to keep things simple. Output looks like:
|
||||
///
|
||||
/// ```xml
|
||||
/// <environment_context>
|
||||
/// <cwd>...</cwd>
|
||||
/// <approval_policy>...</approval_policy>
|
||||
/// <sandbox_mode>...</sandbox_mode>
|
||||
/// <network_access>...</network_access>
|
||||
/// <shell>...</shell>
|
||||
/// </environment_context>
|
||||
/// ```
|
||||
pub fn serialize_to_xml(self) -> String {
|
||||
let mut lines = vec![ENVIRONMENT_CONTEXT_START.to_string()];
|
||||
if let Some(cwd) = self.cwd {
|
||||
lines.push(format!(" <cwd>{}</cwd>", cwd.to_string_lossy()));
|
||||
}
|
||||
if let Some(approval_policy) = self.approval_policy {
|
||||
lines.push(format!(
|
||||
" <approval_policy>{}</approval_policy>",
|
||||
approval_policy
|
||||
));
|
||||
}
|
||||
if let Some(sandbox_mode) = self.sandbox_mode {
|
||||
lines.push(format!(" <sandbox_mode>{}</sandbox_mode>", sandbox_mode));
|
||||
}
|
||||
if let Some(network_access) = self.network_access {
|
||||
lines.push(format!(
|
||||
" <network_access>{}</network_access>",
|
||||
network_access
|
||||
));
|
||||
}
|
||||
if let Some(shell) = self.shell
|
||||
&& let Some(shell_name) = shell.name()
|
||||
{
|
||||
lines.push(format!(" <shell>{}</shell>", shell_name));
|
||||
}
|
||||
lines.push(ENVIRONMENT_CONTEXT_END.to_string());
|
||||
lines.join("\n")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EnvironmentContext> for ResponseItem {
|
||||
fn from(ec: EnvironmentContext) -> Self {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: ec.serialize_to_xml(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
use reqwest::StatusCode;
|
||||
use serde_json;
|
||||
use std::io;
|
||||
use std::time::Duration;
|
||||
use thiserror::Error;
|
||||
use tokio::task::JoinError;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, CodexErr>;
|
||||
|
||||
@@ -41,8 +43,16 @@ pub enum CodexErr {
|
||||
/// handshake has succeeded but **before** it finished emitting `response.completed`.
|
||||
///
|
||||
/// The Session loop treats this as a transient error and will automatically retry the turn.
|
||||
///
|
||||
/// Optionally includes the requested delay before retrying the turn.
|
||||
#[error("stream disconnected before completion: {0}")]
|
||||
Stream(String),
|
||||
Stream(String, Option<Duration>),
|
||||
|
||||
#[error("no conversation with id: {0}")]
|
||||
ConversationNotFound(Uuid),
|
||||
|
||||
#[error("session configured event was not the first event in the stream")]
|
||||
SessionConfiguredNotFirstEvent,
|
||||
|
||||
/// Returned by run_command_stream when the spawned child process timed out (10s).
|
||||
#[error("timeout waiting for child process to exit")]
|
||||
|
||||
@@ -3,10 +3,8 @@ use std::os::unix::process::ExitStatusExt;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::process::ExitStatus;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
@@ -15,11 +13,11 @@ use tokio::io::AsyncRead;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::BufReader;
|
||||
use tokio::process::Child;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::landlock::spawn_command_under_linux_sandbox;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ExecCommandOutputDeltaEvent;
|
||||
@@ -30,18 +28,17 @@ use crate::spawn::StdioPolicy;
|
||||
use crate::spawn::spawn_child_async;
|
||||
use serde_bytes::ByteBuf;
|
||||
|
||||
// Maximum we send for each stream, which is either:
|
||||
// - 10KiB OR
|
||||
// - 256 lines
|
||||
const MAX_STREAM_OUTPUT: usize = 10 * 1024;
|
||||
const MAX_STREAM_OUTPUT_LINES: usize = 256;
|
||||
|
||||
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
|
||||
|
||||
// Hardcode these since it does not seem worth including the libc crate just
|
||||
// for these.
|
||||
const SIGKILL_CODE: i32 = 9;
|
||||
const TIMEOUT_CODE: i32 = 64;
|
||||
const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
|
||||
|
||||
// I/O buffer sizing
|
||||
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
|
||||
const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExecParams {
|
||||
@@ -80,7 +77,6 @@ pub struct StdoutStream {
|
||||
pub async fn process_exec_tool_call(
|
||||
params: ExecParams,
|
||||
sandbox_type: SandboxType,
|
||||
ctrl_c: Arc<Notify>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
codex_linux_sandbox_exe: &Option<PathBuf>,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
@@ -89,7 +85,7 @@ pub async fn process_exec_tool_call(
|
||||
|
||||
let raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr> = match sandbox_type
|
||||
{
|
||||
SandboxType::None => exec(params, sandbox_policy, ctrl_c, stdout_stream.clone()).await,
|
||||
SandboxType::None => exec(params, sandbox_policy, stdout_stream.clone()).await,
|
||||
SandboxType::MacosSeatbelt => {
|
||||
let timeout = params.timeout_duration();
|
||||
let ExecParams {
|
||||
@@ -103,7 +99,7 @@ pub async fn process_exec_tool_call(
|
||||
env,
|
||||
)
|
||||
.await?;
|
||||
consume_truncated_output(child, ctrl_c, timeout, stdout_stream.clone()).await
|
||||
consume_truncated_output(child, timeout, stdout_stream.clone()).await
|
||||
}
|
||||
SandboxType::LinuxSeccomp => {
|
||||
let timeout = params.timeout_duration();
|
||||
@@ -124,7 +120,7 @@ pub async fn process_exec_tool_call(
|
||||
)
|
||||
.await?;
|
||||
|
||||
consume_truncated_output(child, ctrl_c, timeout, stdout_stream).await
|
||||
consume_truncated_output(child, timeout, stdout_stream).await
|
||||
}
|
||||
};
|
||||
let duration = start.elapsed();
|
||||
@@ -156,6 +152,7 @@ pub async fn process_exec_tool_call(
|
||||
exit_code,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output: raw_output.aggregated_output.from_utf8_lossy(),
|
||||
duration,
|
||||
})
|
||||
}
|
||||
@@ -166,65 +163,6 @@ pub async fn process_exec_tool_call(
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
|
||||
/// (codex-linux-sandbox).
|
||||
///
|
||||
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
|
||||
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
|
||||
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
|
||||
/// the equivalent CLI options.
|
||||
pub async fn spawn_command_under_linux_sandbox<P>(
|
||||
codex_linux_sandbox_exe: P,
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = Some("codex-linux-sandbox");
|
||||
spawn_child_async(
|
||||
codex_linux_sandbox_exe.as_ref().to_path_buf(),
|
||||
args,
|
||||
arg0,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
|
||||
fn create_linux_sandbox_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_json =
|
||||
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
|
||||
|
||||
let mut linux_cmd: Vec<String> = vec![
|
||||
sandbox_policy_cwd,
|
||||
sandbox_policy_json,
|
||||
// Separator so that command arguments starting with `-` are not parsed as
|
||||
// options of the helper itself.
|
||||
"--".to_string(),
|
||||
];
|
||||
|
||||
// Append the original tool command.
|
||||
linux_cmd.extend(command);
|
||||
|
||||
linux_cmd
|
||||
}
|
||||
|
||||
/// We don't have a fully deterministic way to tell if our command failed
|
||||
/// because of the sandbox - a command in the user's zshrc file might hit an
|
||||
/// error, but the command itself might fail or succeed for other reasons.
|
||||
@@ -251,10 +189,11 @@ pub struct StreamOutput<T> {
|
||||
pub truncated_after_lines: Option<u32>,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct RawExecToolCallOutput {
|
||||
struct RawExecToolCallOutput {
|
||||
pub exit_status: ExitStatus,
|
||||
pub stdout: StreamOutput<Vec<u8>>,
|
||||
pub stderr: StreamOutput<Vec<u8>>,
|
||||
pub aggregated_output: StreamOutput<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StreamOutput<String> {
|
||||
@@ -275,18 +214,23 @@ impl StreamOutput<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn append_all(dst: &mut Vec<u8>, src: &[u8]) {
|
||||
dst.extend_from_slice(src);
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ExecToolCallOutput {
|
||||
pub exit_code: i32,
|
||||
pub stdout: StreamOutput<String>,
|
||||
pub stderr: StreamOutput<String>,
|
||||
pub aggregated_output: StreamOutput<String>,
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
async fn exec(
|
||||
params: ExecParams,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
ctrl_c: Arc<Notify>,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
let timeout = params.timeout_duration();
|
||||
@@ -311,14 +255,13 @@ async fn exec(
|
||||
env,
|
||||
)
|
||||
.await?;
|
||||
consume_truncated_output(child, ctrl_c, timeout, stdout_stream).await
|
||||
consume_truncated_output(child, timeout, stdout_stream).await
|
||||
}
|
||||
|
||||
/// Consumes the output of a child process, truncating it so it is suitable for
|
||||
/// use as the output of a `shell` tool call. Also enforces specified timeout.
|
||||
pub(crate) async fn consume_truncated_output(
|
||||
async fn consume_truncated_output(
|
||||
mut child: Child,
|
||||
ctrl_c: Arc<Notify>,
|
||||
timeout: Duration,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<RawExecToolCallOutput> {
|
||||
@@ -337,22 +280,21 @@ pub(crate) async fn consume_truncated_output(
|
||||
))
|
||||
})?;
|
||||
|
||||
let (agg_tx, agg_rx) = async_channel::unbounded::<Vec<u8>>();
|
||||
|
||||
let stdout_handle = tokio::spawn(read_capped(
|
||||
BufReader::new(stdout_reader),
|
||||
MAX_STREAM_OUTPUT,
|
||||
MAX_STREAM_OUTPUT_LINES,
|
||||
stdout_stream.clone(),
|
||||
false,
|
||||
Some(agg_tx.clone()),
|
||||
));
|
||||
let stderr_handle = tokio::spawn(read_capped(
|
||||
BufReader::new(stderr_reader),
|
||||
MAX_STREAM_OUTPUT,
|
||||
MAX_STREAM_OUTPUT_LINES,
|
||||
stdout_stream.clone(),
|
||||
true,
|
||||
Some(agg_tx.clone()),
|
||||
));
|
||||
|
||||
let interrupted = ctrl_c.notified();
|
||||
let exit_status = tokio::select! {
|
||||
result = tokio::time::timeout(timeout, child.wait()) => {
|
||||
match result {
|
||||
@@ -362,38 +304,48 @@ pub(crate) async fn consume_truncated_output(
|
||||
// timeout
|
||||
child.start_kill()?;
|
||||
// Debatable whether `child.wait().await` should be called here.
|
||||
synthetic_exit_status(128 + TIMEOUT_CODE)
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = interrupted => {
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
child.start_kill()?;
|
||||
synthetic_exit_status(128 + SIGKILL_CODE)
|
||||
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE)
|
||||
}
|
||||
};
|
||||
|
||||
let stdout = stdout_handle.await??;
|
||||
let stderr = stderr_handle.await??;
|
||||
|
||||
drop(agg_tx);
|
||||
|
||||
let mut combined_buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
|
||||
while let Ok(chunk) = agg_rx.recv().await {
|
||||
append_all(&mut combined_buf, &chunk);
|
||||
}
|
||||
let aggregated_output = StreamOutput {
|
||||
text: combined_buf,
|
||||
truncated_after_lines: None,
|
||||
};
|
||||
|
||||
Ok(RawExecToolCallOutput {
|
||||
exit_status,
|
||||
stdout,
|
||||
stderr,
|
||||
aggregated_output,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
|
||||
mut reader: R,
|
||||
max_output: usize,
|
||||
max_lines: usize,
|
||||
stream: Option<StdoutStream>,
|
||||
is_stderr: bool,
|
||||
aggregate_tx: Option<Sender<Vec<u8>>>,
|
||||
) -> io::Result<StreamOutput<Vec<u8>>> {
|
||||
let mut buf = Vec::with_capacity(max_output.min(8 * 1024));
|
||||
let mut tmp = [0u8; 8192];
|
||||
let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
|
||||
let mut tmp = [0u8; READ_CHUNK_SIZE];
|
||||
|
||||
let mut remaining_bytes = max_output;
|
||||
let mut remaining_lines = max_lines;
|
||||
// No caps: append all bytes
|
||||
|
||||
loop {
|
||||
let n = reader.read(&mut tmp).await?;
|
||||
@@ -420,33 +372,17 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
|
||||
let _ = stream.tx_event.send(event).await;
|
||||
}
|
||||
|
||||
// Copy into the buffer only while we still have byte and line budget.
|
||||
if remaining_bytes > 0 && remaining_lines > 0 {
|
||||
let mut copy_len = 0;
|
||||
for &b in &tmp[..n] {
|
||||
if remaining_bytes == 0 || remaining_lines == 0 {
|
||||
break;
|
||||
}
|
||||
copy_len += 1;
|
||||
remaining_bytes -= 1;
|
||||
if b == b'\n' {
|
||||
remaining_lines -= 1;
|
||||
}
|
||||
}
|
||||
buf.extend_from_slice(&tmp[..copy_len]);
|
||||
if let Some(tx) = &aggregate_tx {
|
||||
let _ = tx.send(tmp[..n].to_vec()).await;
|
||||
}
|
||||
// Continue reading to EOF to avoid back-pressure, but discard once caps are hit.
|
||||
}
|
||||
|
||||
let truncated = remaining_lines == 0 || remaining_bytes == 0;
|
||||
append_all(&mut buf, &tmp[..n]);
|
||||
// Continue reading to EOF to avoid back-pressure
|
||||
}
|
||||
|
||||
Ok(StreamOutput {
|
||||
text: buf,
|
||||
truncated_after_lines: if truncated {
|
||||
Some((max_lines - remaining_lines) as u32)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
truncated_after_lines: None,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
57
codex-rs/core/src/exec_command/exec_command_params.rs
Normal file
57
codex-rs/core/src/exec_command/exec_command_params.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ExecCommandParams {
|
||||
pub(crate) cmd: String,
|
||||
|
||||
#[serde(default = "default_yield_time")]
|
||||
pub(crate) yield_time_ms: u64,
|
||||
|
||||
#[serde(default = "max_output_tokens")]
|
||||
pub(crate) max_output_tokens: u64,
|
||||
|
||||
#[serde(default = "default_shell")]
|
||||
pub(crate) shell: String,
|
||||
|
||||
#[serde(default = "default_login")]
|
||||
pub(crate) login: bool,
|
||||
}
|
||||
|
||||
fn default_yield_time() -> u64 {
|
||||
10_000
|
||||
}
|
||||
|
||||
fn max_output_tokens() -> u64 {
|
||||
10_000
|
||||
}
|
||||
|
||||
fn default_login() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_shell() -> String {
|
||||
"/bin/bash".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct WriteStdinParams {
|
||||
pub(crate) session_id: SessionId,
|
||||
pub(crate) chars: String,
|
||||
|
||||
#[serde(default = "write_stdin_default_yield_time_ms")]
|
||||
pub(crate) yield_time_ms: u64,
|
||||
|
||||
#[serde(default = "write_stdin_default_max_output_tokens")]
|
||||
pub(crate) max_output_tokens: u64,
|
||||
}
|
||||
|
||||
fn write_stdin_default_yield_time_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
fn write_stdin_default_max_output_tokens() -> u64 {
|
||||
10_000
|
||||
}
|
||||
83
codex-rs/core/src/exec_command/exec_command_session.rs
Normal file
83
codex-rs/core/src/exec_command/exec_command_session.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use std::sync::Mutex as StdMutex;
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ExecCommandSession {
|
||||
/// Queue for writing bytes to the process stdin (PTY master write side).
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
/// Broadcast stream of output chunks read from the PTY. New subscribers
|
||||
/// receive only chunks emitted after they subscribe.
|
||||
output_tx: broadcast::Sender<Vec<u8>>,
|
||||
|
||||
/// Child killer handle for termination on drop (can signal independently
|
||||
/// of a thread blocked in `.wait()`).
|
||||
killer: StdMutex<Option<Box<dyn portable_pty::ChildKiller + Send + Sync>>>,
|
||||
|
||||
/// JoinHandle for the blocking PTY reader task.
|
||||
reader_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
|
||||
/// JoinHandle for the stdin writer task.
|
||||
writer_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
|
||||
/// JoinHandle for the child wait task.
|
||||
wait_handle: StdMutex<Option<JoinHandle<()>>>,
|
||||
}
|
||||
|
||||
impl ExecCommandSession {
|
||||
pub(crate) fn new(
|
||||
writer_tx: mpsc::Sender<Vec<u8>>,
|
||||
output_tx: broadcast::Sender<Vec<u8>>,
|
||||
killer: Box<dyn portable_pty::ChildKiller + Send + Sync>,
|
||||
reader_handle: JoinHandle<()>,
|
||||
writer_handle: JoinHandle<()>,
|
||||
wait_handle: JoinHandle<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer: StdMutex::new(Some(killer)),
|
||||
reader_handle: StdMutex::new(Some(reader_handle)),
|
||||
writer_handle: StdMutex::new(Some(writer_handle)),
|
||||
wait_handle: StdMutex::new(Some(wait_handle)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
|
||||
self.writer_tx.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn output_receiver(&self) -> broadcast::Receiver<Vec<u8>> {
|
||||
self.output_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ExecCommandSession {
|
||||
fn drop(&mut self) {
|
||||
// Best-effort: terminate child first so blocking tasks can complete.
|
||||
if let Ok(mut killer_opt) = self.killer.lock()
|
||||
&& let Some(mut killer) = killer_opt.take()
|
||||
{
|
||||
let _ = killer.kill();
|
||||
}
|
||||
|
||||
// Abort background tasks; they may already have exited after kill.
|
||||
if let Ok(mut h) = self.reader_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
if let Ok(mut h) = self.writer_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
if let Ok(mut h) = self.wait_handle.lock()
|
||||
&& let Some(handle) = h.take()
|
||||
{
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
14
codex-rs/core/src/exec_command/mod.rs
Normal file
14
codex-rs/core/src/exec_command/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
mod exec_command_params;
|
||||
mod exec_command_session;
|
||||
mod responses_api;
|
||||
mod session_id;
|
||||
mod session_manager;
|
||||
|
||||
pub use exec_command_params::ExecCommandParams;
|
||||
pub use exec_command_params::WriteStdinParams;
|
||||
pub use responses_api::EXEC_COMMAND_TOOL_NAME;
|
||||
pub use responses_api::WRITE_STDIN_TOOL_NAME;
|
||||
pub use responses_api::create_exec_command_tool_for_responses_api;
|
||||
pub use responses_api::create_write_stdin_tool_for_responses_api;
|
||||
pub use session_manager::SessionManager as ExecSessionManager;
|
||||
pub use session_manager::result_into_payload;
|
||||
98
codex-rs/core/src/exec_command/responses_api.rs
Normal file
98
codex-rs/core/src/exec_command/responses_api.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
pub const EXEC_COMMAND_TOOL_NAME: &str = "exec_command";
|
||||
pub const WRITE_STDIN_TOOL_NAME: &str = "write_stdin";
|
||||
|
||||
pub fn create_exec_command_tool_for_responses_api() -> ResponsesApiTool {
|
||||
let mut properties = BTreeMap::<String, JsonSchema>::new();
|
||||
properties.insert(
|
||||
"cmd".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The shell command to execute.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"yield_time_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum time in milliseconds to wait for output.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum number of tokens to output.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"shell".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The shell to use. Defaults to \"/bin/bash\".".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"login".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some(
|
||||
"Whether to run the command as a login shell. Defaults to true.".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
|
||||
ResponsesApiTool {
|
||||
name: EXEC_COMMAND_TOOL_NAME.to_owned(),
|
||||
description: r#"Execute shell commands on the local machine with streaming output."#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["cmd".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_write_stdin_tool_for_responses_api() -> ResponsesApiTool {
|
||||
let mut properties = BTreeMap::<String, JsonSchema>::new();
|
||||
properties.insert(
|
||||
"session_id".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The ID of the exec_command session.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"chars".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("The characters to write to stdin.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"yield_time_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some(
|
||||
"The maximum time in milliseconds to wait for output after writing.".to_string(),
|
||||
),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"max_output_tokens".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The maximum number of tokens to output.".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
ResponsesApiTool {
|
||||
name: WRITE_STDIN_TOOL_NAME.to_owned(),
|
||||
description: r#"Write characters to an exec session's stdin. Returns all stdout+stderr received within yield_time_ms.
|
||||
Can write control characters (\u0003 for Ctrl-C), or an empty string to just poll stdout+stderr."#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["session_id".to_string(), "chars".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
5
codex-rs/core/src/exec_command/session_id.rs
Normal file
5
codex-rs/core/src/exec_command/session_id.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub(crate) struct SessionId(pub u32);
|
||||
674
codex-rs/core/src/exec_command/session_manager.rs
Normal file
674
codex-rs/core/src/exec_command/session_manager.rs
Normal file
@@ -0,0 +1,674 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use portable_pty::CommandBuilder;
|
||||
use portable_pty::PtySize;
|
||||
use portable_pty::native_pty_system;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::exec_command::exec_command_params::ExecCommandParams;
|
||||
use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use crate::exec_command::exec_command_session::ExecCommandSession;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SessionManager {
|
||||
next_session_id: AtomicU32,
|
||||
sessions: Mutex<HashMap<SessionId, ExecCommandSession>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ExecCommandOutput {
|
||||
wall_time: Duration,
|
||||
exit_status: ExitStatus,
|
||||
original_token_count: Option<u64>,
|
||||
output: String,
|
||||
}
|
||||
|
||||
impl ExecCommandOutput {
|
||||
fn to_text_output(&self) -> String {
|
||||
let wall_time_secs = self.wall_time.as_secs_f32();
|
||||
let termination_status = match self.exit_status {
|
||||
ExitStatus::Exited(code) => format!("Process exited with code {code}"),
|
||||
ExitStatus::Ongoing(session_id) => {
|
||||
format!("Process running with session ID {}", session_id.0)
|
||||
}
|
||||
};
|
||||
let truncation_status = match self.original_token_count {
|
||||
Some(tokens) => {
|
||||
format!("\nWarning: truncated output (original token count: {tokens})")
|
||||
}
|
||||
None => "".to_string(),
|
||||
};
|
||||
format!(
|
||||
r#"Wall time: {wall_time_secs:.3} seconds
|
||||
{termination_status}{truncation_status}
|
||||
Output:
|
||||
{output}"#,
|
||||
output = self.output
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExitStatus {
|
||||
Exited(i32),
|
||||
Ongoing(SessionId),
|
||||
}
|
||||
|
||||
pub fn result_into_payload(result: Result<ExecCommandOutput, String>) -> FunctionCallOutputPayload {
|
||||
match result {
|
||||
Ok(output) => FunctionCallOutputPayload {
|
||||
content: output.to_text_output(),
|
||||
success: Some(true),
|
||||
},
|
||||
Err(err) => FunctionCallOutputPayload {
|
||||
content: err,
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionManager {
|
||||
/// Processes the request and is required to send a response via `outgoing`.
|
||||
pub async fn handle_exec_command_request(
|
||||
&self,
|
||||
params: ExecCommandParams,
|
||||
) -> Result<ExecCommandOutput, String> {
|
||||
// Allocate a session id.
|
||||
let session_id = SessionId(
|
||||
self.next_session_id
|
||||
.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
|
||||
);
|
||||
|
||||
let (session, mut exit_rx) =
|
||||
create_exec_command_session(params.clone())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"failed to create exec command session for session id {}: {err}",
|
||||
session_id.0
|
||||
)
|
||||
})?;
|
||||
|
||||
// Insert into session map.
|
||||
let mut output_rx = session.output_receiver();
|
||||
self.sessions.lock().await.insert(session_id, session);
|
||||
|
||||
// Collect output until either timeout expires or process exits.
|
||||
// Do not cap during collection; truncate at the end if needed.
|
||||
// Use a modest initial capacity to avoid large preallocation.
|
||||
let cap_bytes_u64 = params.max_output_tokens.saturating_mul(4);
|
||||
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
|
||||
let start_time = Instant::now();
|
||||
let deadline = start_time + Duration::from_millis(params.yield_time_ms);
|
||||
let mut exit_code: Option<i32> = None;
|
||||
|
||||
loop {
|
||||
if Instant::now() >= deadline {
|
||||
break;
|
||||
}
|
||||
let remaining = deadline.saturating_duration_since(Instant::now());
|
||||
tokio::select! {
|
||||
biased;
|
||||
exit = &mut exit_rx => {
|
||||
exit_code = exit.ok();
|
||||
// Small grace period to pull remaining buffered output
|
||||
let grace_deadline = Instant::now() + Duration::from_millis(25);
|
||||
while Instant::now() < grace_deadline {
|
||||
match timeout(Duration::from_millis(1), output_rx.recv()).await {
|
||||
Ok(Ok(chunk)) => {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; keep trying within grace period.
|
||||
continue;
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
chunk = timeout(remaining, output_rx.recv()) => {
|
||||
match chunk {
|
||||
Ok(Ok(chunk)) => {
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; continue collecting fresh output.
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => { break; }
|
||||
Err(_) => { break; }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let output = String::from_utf8_lossy(&collected).to_string();
|
||||
|
||||
let exit_status = if let Some(code) = exit_code {
|
||||
ExitStatus::Exited(code)
|
||||
} else {
|
||||
ExitStatus::Ongoing(session_id)
|
||||
};
|
||||
|
||||
// If output exceeds cap, truncate the middle and record original token estimate.
|
||||
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
|
||||
Ok(ExecCommandOutput {
|
||||
wall_time: Instant::now().duration_since(start_time),
|
||||
exit_status,
|
||||
original_token_count,
|
||||
output,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write characters to a session's stdin and collect combined output for up to `yield_time_ms`.
|
||||
pub async fn handle_write_stdin_request(
|
||||
&self,
|
||||
params: WriteStdinParams,
|
||||
) -> Result<ExecCommandOutput, String> {
|
||||
let WriteStdinParams {
|
||||
session_id,
|
||||
chars,
|
||||
yield_time_ms,
|
||||
max_output_tokens,
|
||||
} = params;
|
||||
|
||||
// Grab handles without holding the sessions lock across await points.
|
||||
let (writer_tx, mut output_rx) = {
|
||||
let sessions = self.sessions.lock().await;
|
||||
match sessions.get(&session_id) {
|
||||
Some(session) => (session.writer_sender(), session.output_receiver()),
|
||||
None => {
|
||||
return Err(format!("unknown session id {}", session_id.0));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Write stdin if provided.
|
||||
if !chars.is_empty() && writer_tx.send(chars.into_bytes()).await.is_err() {
|
||||
return Err("failed to write to stdin".to_string());
|
||||
}
|
||||
|
||||
// Collect output up to yield_time_ms, truncating to max_output_tokens bytes.
|
||||
let mut collected: Vec<u8> = Vec::with_capacity(4096);
|
||||
let start_time = Instant::now();
|
||||
let deadline = start_time + Duration::from_millis(yield_time_ms);
|
||||
loop {
|
||||
let now = Instant::now();
|
||||
if now >= deadline {
|
||||
break;
|
||||
}
|
||||
let remaining = deadline - now;
|
||||
match timeout(remaining, output_rx.recv()).await {
|
||||
Ok(Ok(chunk)) => {
|
||||
// Collect all output within the time budget; truncate at the end.
|
||||
collected.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
|
||||
// Skip missed messages; continue collecting fresh output.
|
||||
}
|
||||
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
|
||||
Err(_) => break, // timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Return structured output, truncating middle if over cap.
|
||||
let output = String::from_utf8_lossy(&collected).to_string();
|
||||
let cap_bytes_u64 = max_output_tokens.saturating_mul(4);
|
||||
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
|
||||
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
|
||||
Ok(ExecCommandOutput {
|
||||
wall_time: Instant::now().duration_since(start_time),
|
||||
exit_status: ExitStatus::Ongoing(session_id),
|
||||
original_token_count,
|
||||
output,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn PTY and child process per spawn_exec_command_session logic.
|
||||
async fn create_exec_command_session(
|
||||
params: ExecCommandParams,
|
||||
) -> anyhow::Result<(ExecCommandSession, oneshot::Receiver<i32>)> {
|
||||
let ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: _,
|
||||
max_output_tokens: _,
|
||||
shell,
|
||||
login,
|
||||
} = params;
|
||||
|
||||
// Use the native pty implementation for the system
|
||||
let pty_system = native_pty_system();
|
||||
|
||||
// Create a new pty
|
||||
let pair = pty_system.openpty(PtySize {
|
||||
rows: 24,
|
||||
cols: 80,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
})?;
|
||||
|
||||
// Spawn a shell into the pty
|
||||
let mut command_builder = CommandBuilder::new(shell);
|
||||
let shell_mode_opt = if login { "-lc" } else { "-c" };
|
||||
command_builder.arg(shell_mode_opt);
|
||||
command_builder.arg(cmd);
|
||||
|
||||
let mut child = pair.slave.spawn_command(command_builder)?;
|
||||
// Obtain a killer that can signal the process independently of `.wait()`.
|
||||
let killer = child.clone_killer();
|
||||
|
||||
// Channel to forward write requests to the PTY writer.
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
|
||||
// Broadcast for streaming PTY output to readers: subscribers receive from subscription time.
|
||||
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
|
||||
|
||||
// Reader task: drain PTY and forward chunks to output channel.
|
||||
let mut reader = pair.master.try_clone_reader()?;
|
||||
let output_tx_clone = output_tx.clone();
|
||||
let reader_handle = tokio::task::spawn_blocking(move || {
|
||||
let mut buf = [0u8; 8192];
|
||||
loop {
|
||||
match reader.read(&mut buf) {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(n) => {
|
||||
// Forward to broadcast; best-effort if there are subscribers.
|
||||
let _ = output_tx_clone.send(buf[..n].to_vec());
|
||||
}
|
||||
Err(ref e) if e.kind() == ErrorKind::Interrupted => {
|
||||
// Retry on EINTR
|
||||
continue;
|
||||
}
|
||||
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
|
||||
// We're in a blocking thread; back off briefly and retry.
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
continue;
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Writer task: apply stdin writes to the PTY writer.
|
||||
let writer = pair.master.take_writer()?;
|
||||
let writer = Arc::new(StdMutex::new(writer));
|
||||
let writer_handle = tokio::spawn({
|
||||
let writer = writer.clone();
|
||||
async move {
|
||||
while let Some(bytes) = writer_rx.recv().await {
|
||||
let writer = writer.clone();
|
||||
// Perform blocking write on a blocking thread.
|
||||
let _ = tokio::task::spawn_blocking(move || {
|
||||
if let Ok(mut guard) = writer.lock() {
|
||||
use std::io::Write;
|
||||
let _ = guard.write_all(&bytes);
|
||||
let _ = guard.flush();
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Keep the child alive until it exits, then signal exit code.
|
||||
let (exit_tx, exit_rx) = oneshot::channel::<i32>();
|
||||
let wait_handle = tokio::task::spawn_blocking(move || {
|
||||
let code = match child.wait() {
|
||||
Ok(status) => status.exit_code() as i32,
|
||||
Err(_) => -1,
|
||||
};
|
||||
let _ = exit_tx.send(code);
|
||||
});
|
||||
|
||||
// Create and store the session with channels.
|
||||
let session = ExecCommandSession::new(
|
||||
writer_tx,
|
||||
output_tx,
|
||||
killer,
|
||||
reader_handle,
|
||||
writer_handle,
|
||||
wait_handle,
|
||||
);
|
||||
Ok((session, exit_rx))
|
||||
}
|
||||
|
||||
/// Truncate the middle of a UTF-8 string to at most `max_bytes` bytes,
|
||||
/// preserving the beginning and the end. Returns the possibly truncated
|
||||
/// string and `Some(original_token_count)` (estimated at 4 bytes/token)
|
||||
/// if truncation occurred; otherwise returns the original string and `None`.
|
||||
fn truncate_middle(s: &str, max_bytes: usize) -> (String, Option<u64>) {
|
||||
// No truncation needed
|
||||
if s.len() <= max_bytes {
|
||||
return (s.to_string(), None);
|
||||
}
|
||||
let est_tokens = (s.len() as u64).div_ceil(4);
|
||||
if max_bytes == 0 {
|
||||
// Cannot keep any content; still return a full marker (never truncated).
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
|
||||
// Helper to truncate a string to a given byte length on a char boundary.
|
||||
fn truncate_on_boundary(input: &str, max_len: usize) -> &str {
|
||||
if input.len() <= max_len {
|
||||
return input;
|
||||
}
|
||||
let mut end = max_len;
|
||||
while end > 0 && !input.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
&input[..end]
|
||||
}
|
||||
|
||||
// Given a left/right budget, prefer newline boundaries; otherwise fall back
|
||||
// to UTF-8 char boundaries.
|
||||
fn pick_prefix_end(s: &str, left_budget: usize) -> usize {
|
||||
if let Some(head) = s.get(..left_budget)
|
||||
&& let Some(i) = head.rfind('\n')
|
||||
{
|
||||
return i + 1; // keep the newline so suffix starts on a fresh line
|
||||
}
|
||||
truncate_on_boundary(s, left_budget).len()
|
||||
}
|
||||
|
||||
fn pick_suffix_start(s: &str, right_budget: usize) -> usize {
|
||||
let start_tail = s.len().saturating_sub(right_budget);
|
||||
if let Some(tail) = s.get(start_tail..)
|
||||
&& let Some(i) = tail.find('\n')
|
||||
{
|
||||
return start_tail + i + 1; // start after newline
|
||||
}
|
||||
// Fall back to a char boundary at or after start_tail.
|
||||
let mut idx = start_tail.min(s.len());
|
||||
while idx < s.len() && !s.is_char_boundary(idx) {
|
||||
idx += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
// Refine marker length and budgets until stable. Marker is never truncated.
|
||||
let mut guess_tokens = est_tokens; // worst-case: everything truncated
|
||||
for _ in 0..4 {
|
||||
let marker = format!("…{} tokens truncated…", guess_tokens);
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
// No room for any content within the cap; return a full, untruncated marker
|
||||
// that reflects the entire truncated content.
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let mut suffix_start = pick_suffix_start(s, right_budget);
|
||||
if suffix_start < prefix_end {
|
||||
suffix_start = prefix_end;
|
||||
}
|
||||
let kept_content_bytes = prefix_end + (s.len() - suffix_start);
|
||||
let truncated_content_bytes = s.len().saturating_sub(kept_content_bytes);
|
||||
let new_tokens = (truncated_content_bytes as u64).div_ceil(4);
|
||||
if new_tokens == guess_tokens {
|
||||
let mut out = String::with_capacity(marker_len + kept_content_bytes + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
// Place marker on its own line for symmetry when we keep line boundaries.
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
return (out, Some(est_tokens));
|
||||
}
|
||||
guess_tokens = new_tokens;
|
||||
}
|
||||
|
||||
// Fallback: use last guess to build output.
|
||||
let marker = format!("…{} tokens truncated…", guess_tokens);
|
||||
let marker_len = marker.len();
|
||||
let keep_budget = max_bytes.saturating_sub(marker_len);
|
||||
if keep_budget == 0 {
|
||||
return (
|
||||
format!("…{} tokens truncated…", est_tokens),
|
||||
Some(est_tokens),
|
||||
);
|
||||
}
|
||||
let left_budget = keep_budget / 2;
|
||||
let right_budget = keep_budget - left_budget;
|
||||
let prefix_end = pick_prefix_end(s, left_budget);
|
||||
let suffix_start = pick_suffix_start(s, right_budget);
|
||||
let mut out = String::with_capacity(marker_len + prefix_end + (s.len() - suffix_start) + 1);
|
||||
out.push_str(&s[..prefix_end]);
|
||||
out.push_str(&marker);
|
||||
out.push('\n');
|
||||
out.push_str(&s[suffix_start..]);
|
||||
(out, Some(est_tokens))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exec_command::session_id::SessionId;
|
||||
|
||||
/// Test that verifies that [`SessionManager::handle_exec_command_request()`]
|
||||
/// and [`SessionManager::handle_write_stdin_request()`] work as expected
|
||||
/// in the presence of a process that never terminates (but produces
|
||||
/// output continuously).
|
||||
#[cfg(unix)]
|
||||
#[allow(clippy::print_stderr)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn session_manager_streams_and_truncates_from_now() {
|
||||
use crate::exec_command::exec_command_params::ExecCommandParams;
|
||||
use crate::exec_command::exec_command_params::WriteStdinParams;
|
||||
use tokio::time::sleep;
|
||||
|
||||
let session_manager = SessionManager::default();
|
||||
// Long-running loop that prints an increasing counter every ~100ms.
|
||||
// Use Python for a portable, reliable sleep across shells/PTYs.
|
||||
let cmd = r#"python3 - <<'PY'
|
||||
import sys, time
|
||||
count = 0
|
||||
while True:
|
||||
print(count)
|
||||
sys.stdout.flush()
|
||||
count += 100
|
||||
time.sleep(0.1)
|
||||
PY"#
|
||||
.to_string();
|
||||
|
||||
// Start the session and collect ~3s of output.
|
||||
let params = ExecCommandParams {
|
||||
cmd,
|
||||
yield_time_ms: 3_000,
|
||||
max_output_tokens: 1_000, // large enough to avoid truncation here
|
||||
shell: "/bin/bash".to_string(),
|
||||
login: false,
|
||||
};
|
||||
let initial_output = match session_manager
|
||||
.handle_exec_command_request(params.clone())
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// PTY may be restricted in some sandboxes; skip in that case.
|
||||
if e.contains("openpty") || e.contains("Operation not permitted") {
|
||||
eprintln!("skipping test due to restricted PTY: {e}");
|
||||
return;
|
||||
}
|
||||
panic!("exec request failed unexpectedly: {e}");
|
||||
}
|
||||
};
|
||||
eprintln!("initial output: {initial_output:?}");
|
||||
|
||||
// Should be ongoing (we launched a never-ending loop).
|
||||
let session_id = match initial_output.exit_status {
|
||||
ExitStatus::Ongoing(id) => id,
|
||||
_ => panic!("expected ongoing session"),
|
||||
};
|
||||
|
||||
// Parse the numeric lines and get the max observed value in the first window.
|
||||
let first_nums = extract_monotonic_numbers(&initial_output.output);
|
||||
assert!(
|
||||
!first_nums.is_empty(),
|
||||
"expected some output from first window"
|
||||
);
|
||||
let first_max = *first_nums.iter().max().unwrap();
|
||||
|
||||
// Wait ~4s so counters progress while we're not reading.
|
||||
sleep(Duration::from_millis(4_000)).await;
|
||||
|
||||
// Now read ~3s of output "from now" only.
|
||||
// Use a small token cap so truncation occurs and we test middle truncation.
|
||||
let write_params = WriteStdinParams {
|
||||
session_id,
|
||||
chars: String::new(),
|
||||
yield_time_ms: 3_000,
|
||||
max_output_tokens: 16, // 16 tokens ~= 64 bytes -> likely truncation
|
||||
};
|
||||
let second = session_manager
|
||||
.handle_write_stdin_request(write_params)
|
||||
.await
|
||||
.expect("write stdin should succeed");
|
||||
|
||||
// Verify truncation metadata and size bound (cap is tokens*4 bytes).
|
||||
assert!(second.original_token_count.is_some());
|
||||
let cap_bytes = (16u64 * 4) as usize;
|
||||
assert!(second.output.len() <= cap_bytes);
|
||||
// New middle marker should be present.
|
||||
assert!(
|
||||
second.output.contains("tokens truncated") && second.output.contains('…'),
|
||||
"expected truncation marker in output, got: {}",
|
||||
second.output
|
||||
);
|
||||
|
||||
// Minimal freshness check: the earliest number we see in the second window
|
||||
// should be significantly larger than the last from the first window.
|
||||
let second_nums = extract_monotonic_numbers(&second.output);
|
||||
assert!(
|
||||
!second_nums.is_empty(),
|
||||
"expected some numeric output from second window"
|
||||
);
|
||||
let second_min = *second_nums.iter().min().unwrap();
|
||||
|
||||
// We slept 4 seconds (~40 ticks at 100ms/tick, each +100), so expect
|
||||
// an increase of roughly 4000 or more. Allow a generous margin.
|
||||
assert!(
|
||||
second_min >= first_max + 2000,
|
||||
"second_min={second_min} first_max={first_max}",
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn extract_monotonic_numbers(s: &str) -> Vec<i64> {
|
||||
s.lines()
|
||||
.filter_map(|line| {
|
||||
if !line.is_empty()
|
||||
&& line.chars().all(|c| c.is_ascii_digit())
|
||||
&& let Ok(n) = line.parse::<i64>()
|
||||
{
|
||||
// Our generator increments by 100; ignore spurious fragments.
|
||||
if n % 100 == 0 {
|
||||
return Some(n);
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_text_output_exited_no_truncation() {
|
||||
let out = ExecCommandOutput {
|
||||
wall_time: Duration::from_millis(1234),
|
||||
exit_status: ExitStatus::Exited(0),
|
||||
original_token_count: None,
|
||||
output: "hello".to_string(),
|
||||
};
|
||||
let text = out.to_text_output();
|
||||
let expected = r#"Wall time: 1.234 seconds
|
||||
Process exited with code 0
|
||||
Output:
|
||||
hello"#;
|
||||
assert_eq!(expected, text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_text_output_ongoing_with_truncation() {
|
||||
let out = ExecCommandOutput {
|
||||
wall_time: Duration::from_millis(500),
|
||||
exit_status: ExitStatus::Ongoing(SessionId(42)),
|
||||
original_token_count: Some(1000),
|
||||
output: "abc".to_string(),
|
||||
};
|
||||
let text = out.to_text_output();
|
||||
let expected = r#"Wall time: 0.500 seconds
|
||||
Process running with session ID 42
|
||||
Warning: truncated output (original token count: 1000)
|
||||
Output:
|
||||
abc"#;
|
||||
assert_eq!(expected, text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_no_newlines_fallback() {
|
||||
// A long string with no newlines that exceeds the cap.
|
||||
let s = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
let max_bytes = 16; // force truncation
|
||||
let (out, original) = truncate_middle(s, max_bytes);
|
||||
// For very small caps, we return the full, untruncated marker,
|
||||
// even if it exceeds the cap.
|
||||
assert_eq!(out, "…16 tokens truncated…");
|
||||
// Original string length is 62 bytes => ceil(62/4) = 16 tokens.
|
||||
assert_eq!(original, Some(16));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_middle_prefers_newline_boundaries() {
|
||||
// Build a multi-line string of 20 numbered lines (each "NNN\n").
|
||||
let mut s = String::new();
|
||||
for i in 1..=20 {
|
||||
s.push_str(&format!("{i:03}\n"));
|
||||
}
|
||||
// Total length: 20 lines * 4 bytes per line = 80 bytes.
|
||||
assert_eq!(s.len(), 80);
|
||||
|
||||
// Choose a cap that forces truncation while leaving room for
|
||||
// a few lines on each side after accounting for the marker.
|
||||
let max_bytes = 64;
|
||||
// Expect exact output: first 4 lines, marker, last 4 lines, and correct token estimate (80/4 = 20).
|
||||
assert_eq!(
|
||||
truncate_middle(&s, max_bytes),
|
||||
(
|
||||
r#"001
|
||||
002
|
||||
003
|
||||
004
|
||||
…12 tokens truncated…
|
||||
017
|
||||
018
|
||||
019
|
||||
020
|
||||
"#
|
||||
.to_string(),
|
||||
Some(20)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -70,8 +70,6 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
|
||||
use super::*;
|
||||
use crate::config_types::ShellEnvironmentPolicyInherit;
|
||||
use maplit::hashmap;
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::mcp_protocol::GitSha;
|
||||
use futures::future::join_all;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use tokio::process::Command;
|
||||
use tokio::time::Duration as TokioDuration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::util::is_inside_git_repo;
|
||||
|
||||
/// Timeout for git commands to prevent freezing on large repositories
|
||||
const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5);
|
||||
|
||||
@@ -22,6 +28,12 @@ pub struct GitInfo {
|
||||
pub repository_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct GitDiffToRemote {
|
||||
pub sha: GitSha,
|
||||
pub diff: String,
|
||||
}
|
||||
|
||||
/// Collect git repository information from the given working directory using command-line git.
|
||||
/// Returns None if no git repository is found or if git operations fail.
|
||||
/// Uses timeouts to prevent freezing on large repositories.
|
||||
@@ -51,38 +63,52 @@ pub async fn collect_git_info(cwd: &Path) -> Option<GitInfo> {
|
||||
};
|
||||
|
||||
// Process commit hash
|
||||
if let Some(output) = commit_result {
|
||||
if output.status.success() {
|
||||
if let Ok(hash) = String::from_utf8(output.stdout) {
|
||||
git_info.commit_hash = Some(hash.trim().to_string());
|
||||
}
|
||||
}
|
||||
if let Some(output) = commit_result
|
||||
&& output.status.success()
|
||||
&& let Ok(hash) = String::from_utf8(output.stdout)
|
||||
{
|
||||
git_info.commit_hash = Some(hash.trim().to_string());
|
||||
}
|
||||
|
||||
// Process branch name
|
||||
if let Some(output) = branch_result {
|
||||
if output.status.success() {
|
||||
if let Ok(branch) = String::from_utf8(output.stdout) {
|
||||
let branch = branch.trim();
|
||||
if branch != "HEAD" {
|
||||
git_info.branch = Some(branch.to_string());
|
||||
}
|
||||
}
|
||||
if let Some(output) = branch_result
|
||||
&& output.status.success()
|
||||
&& let Ok(branch) = String::from_utf8(output.stdout)
|
||||
{
|
||||
let branch = branch.trim();
|
||||
if branch != "HEAD" {
|
||||
git_info.branch = Some(branch.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Process repository URL
|
||||
if let Some(output) = url_result {
|
||||
if output.status.success() {
|
||||
if let Ok(url) = String::from_utf8(output.stdout) {
|
||||
git_info.repository_url = Some(url.trim().to_string());
|
||||
}
|
||||
}
|
||||
if let Some(output) = url_result
|
||||
&& output.status.success()
|
||||
&& let Ok(url) = String::from_utf8(output.stdout)
|
||||
{
|
||||
git_info.repository_url = Some(url.trim().to_string());
|
||||
}
|
||||
|
||||
Some(git_info)
|
||||
}
|
||||
|
||||
/// Returns the closest git sha to HEAD that is on a remote as well as the diff to that sha.
|
||||
pub async fn git_diff_to_remote(cwd: &Path) -> Option<GitDiffToRemote> {
|
||||
if !is_inside_git_repo(cwd) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let remotes = get_git_remotes(cwd).await?;
|
||||
let branches = branch_ancestry(cwd).await?;
|
||||
let base_sha = find_closest_sha(cwd, &branches, &remotes).await?;
|
||||
let diff = diff_against_sha(cwd, &base_sha).await?;
|
||||
|
||||
Some(GitDiffToRemote {
|
||||
sha: base_sha,
|
||||
diff,
|
||||
})
|
||||
}
|
||||
|
||||
/// Run a git command with a timeout to prevent blocking on large repositories
|
||||
async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::process::Output> {
|
||||
let result = timeout(
|
||||
@@ -97,11 +123,343 @@ async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_git_remotes(cwd: &Path) -> Option<Vec<String>> {
|
||||
let output = run_git_command_with_timeout(&["remote"], cwd).await?;
|
||||
if !output.status.success() {
|
||||
return None;
|
||||
}
|
||||
let mut remotes: Vec<String> = String::from_utf8(output.stdout)
|
||||
.ok()?
|
||||
.lines()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
if let Some(pos) = remotes.iter().position(|r| r == "origin") {
|
||||
let origin = remotes.remove(pos);
|
||||
remotes.insert(0, origin);
|
||||
}
|
||||
Some(remotes)
|
||||
}
|
||||
|
||||
/// Attempt to determine the repository's default branch name.
|
||||
///
|
||||
/// Preference order:
|
||||
/// 1) The symbolic ref at `refs/remotes/<remote>/HEAD` for the first remote (origin prioritized)
|
||||
/// 2) `git remote show <remote>` parsed for "HEAD branch: <name>"
|
||||
/// 3) Local fallback to existing `main` or `master` if present
|
||||
async fn get_default_branch(cwd: &Path) -> Option<String> {
|
||||
// Prefer the first remote (with origin prioritized)
|
||||
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
|
||||
for remote in remotes {
|
||||
// Try symbolic-ref, which returns something like: refs/remotes/origin/main
|
||||
if let Some(symref_output) = run_git_command_with_timeout(
|
||||
&[
|
||||
"symbolic-ref",
|
||||
"--quiet",
|
||||
&format!("refs/remotes/{remote}/HEAD"),
|
||||
],
|
||||
cwd,
|
||||
)
|
||||
.await
|
||||
&& symref_output.status.success()
|
||||
&& let Ok(sym) = String::from_utf8(symref_output.stdout)
|
||||
{
|
||||
let trimmed = sym.trim();
|
||||
if let Some((_, name)) = trimmed.rsplit_once('/') {
|
||||
return Some(name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to parsing `git remote show <remote>` output
|
||||
if let Some(show_output) =
|
||||
run_git_command_with_timeout(&["remote", "show", &remote], cwd).await
|
||||
&& show_output.status.success()
|
||||
&& let Ok(text) = String::from_utf8(show_output.stdout)
|
||||
{
|
||||
for line in text.lines() {
|
||||
let line = line.trim();
|
||||
if let Some(rest) = line.strip_prefix("HEAD branch:") {
|
||||
let name = rest.trim();
|
||||
if !name.is_empty() {
|
||||
return Some(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No remote-derived default; try common local defaults if they exist
|
||||
for candidate in ["main", "master"] {
|
||||
if let Some(verify) = run_git_command_with_timeout(
|
||||
&[
|
||||
"rev-parse",
|
||||
"--verify",
|
||||
"--quiet",
|
||||
&format!("refs/heads/{candidate}"),
|
||||
],
|
||||
cwd,
|
||||
)
|
||||
.await
|
||||
&& verify.status.success()
|
||||
{
|
||||
return Some(candidate.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Build an ancestry of branches starting at the current branch and ending at the
|
||||
/// repository's default branch (if determinable)..
|
||||
async fn branch_ancestry(cwd: &Path) -> Option<Vec<String>> {
|
||||
// Discover current branch (ignore detached HEAD by treating it as None)
|
||||
let current_branch = run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd)
|
||||
.await
|
||||
.and_then(|o| {
|
||||
if o.status.success() {
|
||||
String::from_utf8(o.stdout).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| s != "HEAD");
|
||||
|
||||
// Discover default branch
|
||||
let default_branch = get_default_branch(cwd).await;
|
||||
|
||||
let mut ancestry: Vec<String> = Vec::new();
|
||||
let mut seen: HashSet<String> = HashSet::new();
|
||||
if let Some(cb) = current_branch.clone() {
|
||||
seen.insert(cb.clone());
|
||||
ancestry.push(cb);
|
||||
}
|
||||
if let Some(db) = default_branch
|
||||
&& !seen.contains(&db)
|
||||
{
|
||||
seen.insert(db.clone());
|
||||
ancestry.push(db);
|
||||
}
|
||||
|
||||
// Expand candidates: include any remote branches that already contain HEAD.
|
||||
// This addresses cases where we're on a new local-only branch forked from a
|
||||
// remote branch that isn't the repository default. We prioritize remotes in
|
||||
// the order returned by get_git_remotes (origin first).
|
||||
let remotes = get_git_remotes(cwd).await.unwrap_or_default();
|
||||
for remote in remotes {
|
||||
if let Some(output) = run_git_command_with_timeout(
|
||||
&[
|
||||
"for-each-ref",
|
||||
"--format=%(refname:short)",
|
||||
"--contains=HEAD",
|
||||
&format!("refs/remotes/{remote}"),
|
||||
],
|
||||
cwd,
|
||||
)
|
||||
.await
|
||||
&& output.status.success()
|
||||
&& let Ok(text) = String::from_utf8(output.stdout)
|
||||
{
|
||||
for line in text.lines() {
|
||||
let short = line.trim();
|
||||
// Expect format like: "origin/feature"; extract the branch path after "remote/"
|
||||
if let Some(stripped) = short.strip_prefix(&format!("{remote}/"))
|
||||
&& !stripped.is_empty()
|
||||
&& !seen.contains(stripped)
|
||||
{
|
||||
seen.insert(stripped.to_string());
|
||||
ancestry.push(stripped.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we return Some vector, even if empty, to allow caller logic to proceed
|
||||
Some(ancestry)
|
||||
}
|
||||
|
||||
// Helper for a single branch: return the remote SHA if present on any remote
|
||||
// and the distance (commits ahead of HEAD) for that branch. The first item is
|
||||
// None if the branch is not present on any remote. Returns None if distance
|
||||
// could not be computed due to git errors/timeouts.
|
||||
async fn branch_remote_and_distance(
|
||||
cwd: &Path,
|
||||
branch: &str,
|
||||
remotes: &[String],
|
||||
) -> Option<(Option<GitSha>, usize)> {
|
||||
// Try to find the first remote ref that exists for this branch (origin prioritized by caller).
|
||||
let mut found_remote_sha: Option<GitSha> = None;
|
||||
let mut found_remote_ref: Option<String> = None;
|
||||
for remote in remotes {
|
||||
let remote_ref = format!("refs/remotes/{remote}/{branch}");
|
||||
let Some(verify_output) =
|
||||
run_git_command_with_timeout(&["rev-parse", "--verify", "--quiet", &remote_ref], cwd)
|
||||
.await
|
||||
else {
|
||||
// Mirror previous behavior: if the verify call times out/fails at the process level,
|
||||
// treat the entire branch as unusable.
|
||||
return None;
|
||||
};
|
||||
if !verify_output.status.success() {
|
||||
continue;
|
||||
}
|
||||
let Ok(sha) = String::from_utf8(verify_output.stdout) else {
|
||||
// Mirror previous behavior and skip the entire branch on parse failure.
|
||||
return None;
|
||||
};
|
||||
found_remote_sha = Some(GitSha::new(sha.trim()));
|
||||
found_remote_ref = Some(remote_ref);
|
||||
break;
|
||||
}
|
||||
|
||||
// Compute distance as the number of commits HEAD is ahead of the branch.
|
||||
// Prefer local branch name if it exists; otherwise fall back to the remote ref (if any).
|
||||
let count_output = if let Some(local_count) =
|
||||
run_git_command_with_timeout(&["rev-list", "--count", &format!("{branch}..HEAD")], cwd)
|
||||
.await
|
||||
{
|
||||
if local_count.status.success() {
|
||||
local_count
|
||||
} else if let Some(remote_ref) = &found_remote_ref {
|
||||
match run_git_command_with_timeout(
|
||||
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
|
||||
cwd,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(remote_count) => remote_count,
|
||||
None => return None,
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
} else if let Some(remote_ref) = &found_remote_ref {
|
||||
match run_git_command_with_timeout(
|
||||
&["rev-list", "--count", &format!("{remote_ref}..HEAD")],
|
||||
cwd,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(remote_count) => remote_count,
|
||||
None => return None,
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
|
||||
if !count_output.status.success() {
|
||||
return None;
|
||||
}
|
||||
let Ok(distance_str) = String::from_utf8(count_output.stdout) else {
|
||||
return None;
|
||||
};
|
||||
let Ok(distance) = distance_str.trim().parse::<usize>() else {
|
||||
return None;
|
||||
};
|
||||
|
||||
Some((found_remote_sha, distance))
|
||||
}
|
||||
|
||||
// Finds the closest sha that exist on any of branches and also exists on any of the remotes.
|
||||
async fn find_closest_sha(cwd: &Path, branches: &[String], remotes: &[String]) -> Option<GitSha> {
|
||||
// A sha and how many commits away from HEAD it is.
|
||||
let mut closest_sha: Option<(GitSha, usize)> = None;
|
||||
for branch in branches {
|
||||
let Some((maybe_remote_sha, distance)) =
|
||||
branch_remote_and_distance(cwd, branch, remotes).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
let Some(remote_sha) = maybe_remote_sha else {
|
||||
// Preserve existing behavior: skip branches that are not present on a remote.
|
||||
continue;
|
||||
};
|
||||
match &closest_sha {
|
||||
None => closest_sha = Some((remote_sha, distance)),
|
||||
Some((_, best_distance)) if distance < *best_distance => {
|
||||
closest_sha = Some((remote_sha, distance));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
closest_sha.map(|(sha, _)| sha)
|
||||
}
|
||||
|
||||
async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> {
|
||||
let output = run_git_command_with_timeout(&["diff", &sha.0], cwd).await?;
|
||||
// 0 is success and no diff.
|
||||
// 1 is success but there is a diff.
|
||||
let exit_ok = output.status.code().is_some_and(|c| c == 0 || c == 1);
|
||||
if !exit_ok {
|
||||
return None;
|
||||
}
|
||||
let mut diff = String::from_utf8(output.stdout).ok()?;
|
||||
|
||||
if let Some(untracked_output) =
|
||||
run_git_command_with_timeout(&["ls-files", "--others", "--exclude-standard"], cwd).await
|
||||
&& untracked_output.status.success()
|
||||
{
|
||||
let untracked: Vec<String> = String::from_utf8(untracked_output.stdout)
|
||||
.ok()?
|
||||
.lines()
|
||||
.map(|s| s.to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
if !untracked.is_empty() {
|
||||
let futures_iter = untracked.into_iter().map(|file| async move {
|
||||
let file_owned = file;
|
||||
let args_vec: Vec<&str> =
|
||||
vec!["diff", "--binary", "--no-index", "/dev/null", &file_owned];
|
||||
run_git_command_with_timeout(&args_vec, cwd).await
|
||||
});
|
||||
let results = join_all(futures_iter).await;
|
||||
for extra in results.into_iter().flatten() {
|
||||
if extra.status.code().is_some_and(|c| c == 0 || c == 1)
|
||||
&& let Ok(s) = String::from_utf8(extra.stdout)
|
||||
{
|
||||
diff.push_str(&s);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(diff)
|
||||
}
|
||||
|
||||
/// Resolve the path that should be used for trust checks. Similar to
|
||||
/// `[utils::is_inside_git_repo]`, but resolves to the root of the main
|
||||
/// repository. Handles worktrees.
|
||||
pub fn resolve_root_git_project_for_trust(cwd: &Path) -> Option<PathBuf> {
|
||||
let base = if cwd.is_dir() { cwd } else { cwd.parent()? };
|
||||
|
||||
// TODO: we should make this async, but it's primarily used deep in
|
||||
// callstacks of sync code, and should almost always be fast
|
||||
let git_dir_out = std::process::Command::new("git")
|
||||
.args(["rev-parse", "--git-common-dir"])
|
||||
.current_dir(base)
|
||||
.output()
|
||||
.ok()?;
|
||||
if !git_dir_out.status.success() {
|
||||
return None;
|
||||
}
|
||||
let git_dir_s = String::from_utf8(git_dir_out.stdout)
|
||||
.ok()?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let git_dir_path_raw = if Path::new(&git_dir_s).is_absolute() {
|
||||
PathBuf::from(&git_dir_s)
|
||||
} else {
|
||||
base.join(&git_dir_s)
|
||||
};
|
||||
|
||||
// Normalize to handle macOS /var vs /private/var and resolve ".." segments.
|
||||
let git_dir_path = std::fs::canonicalize(&git_dir_path_raw).unwrap_or(git_dir_path_raw);
|
||||
git_dir_path.parent().map(Path::to_path_buf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::expect_used)]
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use super::*;
|
||||
|
||||
use std::fs;
|
||||
@@ -110,7 +468,8 @@ mod tests {
|
||||
|
||||
// Helper function to create a test git repository
|
||||
async fn create_test_git_repo(temp_dir: &TempDir) -> PathBuf {
|
||||
let repo_path = temp_dir.path().to_path_buf();
|
||||
let repo_path = temp_dir.path().join("repo");
|
||||
fs::create_dir(&repo_path).expect("Failed to create repo dir");
|
||||
let envs = vec![
|
||||
("GIT_CONFIG_GLOBAL", "/dev/null"),
|
||||
("GIT_CONFIG_NOSYSTEM", "1"),
|
||||
@@ -165,6 +524,41 @@ mod tests {
|
||||
repo_path
|
||||
}
|
||||
|
||||
async fn create_test_git_repo_with_remote(temp_dir: &TempDir) -> (PathBuf, String) {
|
||||
let repo_path = create_test_git_repo(temp_dir).await;
|
||||
let remote_path = temp_dir.path().join("remote.git");
|
||||
|
||||
Command::new("git")
|
||||
.args(["init", "--bare", remote_path.to_str().unwrap()])
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to init bare remote");
|
||||
|
||||
Command::new("git")
|
||||
.args(["remote", "add", "origin", remote_path.to_str().unwrap()])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to add remote");
|
||||
|
||||
let output = Command::new("git")
|
||||
.args(["rev-parse", "--abbrev-ref", "HEAD"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to get branch");
|
||||
let branch = String::from_utf8(output.stdout).unwrap().trim().to_string();
|
||||
|
||||
Command::new("git")
|
||||
.args(["push", "-u", "origin", &branch])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to push initial commit");
|
||||
|
||||
(repo_path, branch)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_git_info_non_git_directory() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
@@ -278,6 +672,210 @@ mod tests {
|
||||
assert_eq!(git_info.branch, Some("feature-branch".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_git_working_tree_state_clean_repo() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
|
||||
|
||||
let remote_sha = Command::new("git")
|
||||
.args(["rev-parse", &format!("origin/{branch}")])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to rev-parse remote");
|
||||
let remote_sha = String::from_utf8(remote_sha.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let state = git_diff_to_remote(&repo_path)
|
||||
.await
|
||||
.expect("Should collect working tree state");
|
||||
assert_eq!(state.sha, GitSha::new(&remote_sha));
|
||||
assert!(state.diff.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_git_working_tree_state_with_changes() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
|
||||
|
||||
let tracked = repo_path.join("test.txt");
|
||||
fs::write(&tracked, "modified").unwrap();
|
||||
fs::write(repo_path.join("untracked.txt"), "new").unwrap();
|
||||
|
||||
let remote_sha = Command::new("git")
|
||||
.args(["rev-parse", &format!("origin/{branch}")])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to rev-parse remote");
|
||||
let remote_sha = String::from_utf8(remote_sha.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let state = git_diff_to_remote(&repo_path)
|
||||
.await
|
||||
.expect("Should collect working tree state");
|
||||
assert_eq!(state.sha, GitSha::new(&remote_sha));
|
||||
assert!(state.diff.contains("test.txt"));
|
||||
assert!(state.diff.contains("untracked.txt"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_git_working_tree_state_branch_fallback() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let (repo_path, _branch) = create_test_git_repo_with_remote(&temp_dir).await;
|
||||
|
||||
Command::new("git")
|
||||
.args(["checkout", "-b", "feature"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to create feature branch");
|
||||
Command::new("git")
|
||||
.args(["push", "-u", "origin", "feature"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to push feature branch");
|
||||
|
||||
Command::new("git")
|
||||
.args(["checkout", "-b", "local-branch"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to create local branch");
|
||||
|
||||
let remote_sha = Command::new("git")
|
||||
.args(["rev-parse", "origin/feature"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to rev-parse remote");
|
||||
let remote_sha = String::from_utf8(remote_sha.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let state = git_diff_to_remote(&repo_path)
|
||||
.await
|
||||
.expect("Should collect working tree state");
|
||||
assert_eq!(state.sha, GitSha::new(&remote_sha));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_root_git_project_for_trust_returns_none_outside_repo() {
|
||||
let tmp = TempDir::new().expect("tempdir");
|
||||
assert!(resolve_root_git_project_for_trust(tmp.path()).is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resolve_root_git_project_for_trust_regular_repo_returns_repo_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap().to_path_buf();
|
||||
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&repo_path),
|
||||
Some(expected.clone())
|
||||
);
|
||||
let nested = repo_path.join("sub/dir");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&nested),
|
||||
Some(expected.clone())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resolve_root_git_project_for_trust_detects_worktree_and_returns_main_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
|
||||
// Create a linked worktree
|
||||
let wt_root = temp_dir.path().join("wt");
|
||||
let _ = std::process::Command::new("git")
|
||||
.args([
|
||||
"worktree",
|
||||
"add",
|
||||
wt_root.to_str().unwrap(),
|
||||
"-b",
|
||||
"feature/x",
|
||||
])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.expect("git worktree add");
|
||||
|
||||
let expected = std::fs::canonicalize(&repo_path).ok();
|
||||
let got = resolve_root_git_project_for_trust(&wt_root)
|
||||
.and_then(|p| std::fs::canonicalize(p).ok());
|
||||
assert_eq!(got, expected);
|
||||
let nested = wt_root.join("nested/sub");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
let got_nested =
|
||||
resolve_root_git_project_for_trust(&nested).and_then(|p| std::fs::canonicalize(p).ok());
|
||||
assert_eq!(got_nested, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_root_git_project_for_trust_non_worktrees_gitdir_returns_none() {
|
||||
let tmp = TempDir::new().expect("tempdir");
|
||||
let proj = tmp.path().join("proj");
|
||||
std::fs::create_dir_all(proj.join("nested")).unwrap();
|
||||
|
||||
// `.git` is a file but does not point to a worktrees path
|
||||
std::fs::write(
|
||||
proj.join(".git"),
|
||||
format!(
|
||||
"gitdir: {}\n",
|
||||
tmp.path().join("some/other/location").display()
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(resolve_root_git_project_for_trust(&proj).is_none());
|
||||
assert!(resolve_root_git_project_for_trust(&proj.join("nested")).is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_git_working_tree_state_unpushed_commit() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await;
|
||||
|
||||
let remote_sha = Command::new("git")
|
||||
.args(["rev-parse", &format!("origin/{branch}")])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to rev-parse remote");
|
||||
let remote_sha = String::from_utf8(remote_sha.stdout)
|
||||
.unwrap()
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
fs::write(repo_path.join("test.txt"), "updated").unwrap();
|
||||
Command::new("git")
|
||||
.args(["add", "test.txt"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to add file");
|
||||
Command::new("git")
|
||||
.args(["commit", "-m", "local change"])
|
||||
.current_dir(&repo_path)
|
||||
.output()
|
||||
.await
|
||||
.expect("Failed to commit");
|
||||
|
||||
let state = git_diff_to_remote(&repo_path)
|
||||
.await
|
||||
.expect("Should collect working tree state");
|
||||
assert_eq!(state.sha, GitSha::new(&remote_sha));
|
||||
assert!(state.diff.contains("updated"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_git_info_serialization() {
|
||||
let git_info = GitInfo {
|
||||
|
||||
@@ -12,20 +12,17 @@ pub fn is_known_safe_command(command: &[String]) -> bool {
|
||||
// introduce side effects ( "&&", "||", ";", and "|" ). If every
|
||||
// individual command in the script is itself a known‑safe command, then
|
||||
// the composite expression is considered safe.
|
||||
if let [bash, flag, script] = command {
|
||||
if bash == "bash" && flag == "-lc" {
|
||||
if let Some(tree) = try_parse_bash(script) {
|
||||
if let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script) {
|
||||
if !all_commands.is_empty()
|
||||
&& all_commands
|
||||
.iter()
|
||||
.all(|cmd| is_safe_to_call_with_exec(cmd))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let [bash, flag, script] = command
|
||||
&& bash == "bash"
|
||||
&& flag == "-lc"
|
||||
&& let Some(tree) = try_parse_bash(script)
|
||||
&& let Some(all_commands) = try_parse_word_only_commands_sequence(&tree, script)
|
||||
&& !all_commands.is_empty()
|
||||
&& all_commands
|
||||
.iter()
|
||||
.all(|cmd| is_safe_to_call_with_exec(cmd))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
@@ -162,7 +159,6 @@ fn is_valid_sed_n_arg(arg: Option<&str>) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
|
||||
fn vec_str(args: &[&str]) -> Vec<String> {
|
||||
|
||||
66
codex-rs/core/src/landlock.rs
Normal file
66
codex-rs/core/src/landlock.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::spawn::StdioPolicy;
|
||||
use crate::spawn::spawn_child_async;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::process::Child;
|
||||
|
||||
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
|
||||
/// (codex-linux-sandbox).
|
||||
///
|
||||
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
|
||||
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
|
||||
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
|
||||
/// the equivalent CLI options.
|
||||
pub async fn spawn_command_under_linux_sandbox<P>(
|
||||
codex_linux_sandbox_exe: P,
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: PathBuf,
|
||||
stdio_policy: StdioPolicy,
|
||||
env: HashMap<String, String>,
|
||||
) -> std::io::Result<Child>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let args = create_linux_sandbox_command_args(command, sandbox_policy, &cwd);
|
||||
let arg0 = Some("codex-linux-sandbox");
|
||||
spawn_child_async(
|
||||
codex_linux_sandbox_exe.as_ref().to_path_buf(),
|
||||
args,
|
||||
arg0,
|
||||
cwd,
|
||||
sandbox_policy,
|
||||
stdio_policy,
|
||||
env,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
|
||||
fn create_linux_sandbox_command_args(
|
||||
command: Vec<String>,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: &Path,
|
||||
) -> Vec<String> {
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_cwd = cwd.to_str().expect("cwd must be valid UTF-8").to_string();
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
let sandbox_policy_json =
|
||||
serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON");
|
||||
|
||||
let mut linux_cmd: Vec<String> = vec![
|
||||
sandbox_policy_cwd,
|
||||
sandbox_policy_json,
|
||||
// Separator so that command arguments starting with `-` are not parsed as
|
||||
// options of the helper itself.
|
||||
"--".to_string(),
|
||||
];
|
||||
|
||||
// Append the original tool command.
|
||||
linux_cmd.extend(command);
|
||||
|
||||
linux_cmd
|
||||
}
|
||||
@@ -11,19 +11,21 @@ mod chat_completions;
|
||||
mod client;
|
||||
mod client_common;
|
||||
pub mod codex;
|
||||
pub use codex::Codex;
|
||||
pub use codex::CodexSpawnOk;
|
||||
pub mod codex_wrapper;
|
||||
mod codex_conversation;
|
||||
pub use codex_conversation::CodexConversation;
|
||||
pub mod config;
|
||||
pub mod config_profile;
|
||||
pub mod config_types;
|
||||
mod conversation_history;
|
||||
mod environment_context;
|
||||
pub mod error;
|
||||
pub mod exec;
|
||||
mod exec_command;
|
||||
pub mod exec_env;
|
||||
mod flags;
|
||||
pub mod git_info;
|
||||
mod is_safe_command;
|
||||
pub mod landlock;
|
||||
mod mcp_connection_manager;
|
||||
mod mcp_tool_call;
|
||||
mod message_history;
|
||||
@@ -34,20 +36,30 @@ pub use model_provider_info::ModelProviderInfo;
|
||||
pub use model_provider_info::WireApi;
|
||||
pub use model_provider_info::built_in_model_providers;
|
||||
pub use model_provider_info::create_oss_provider_with_base_url;
|
||||
mod conversation_manager;
|
||||
pub use conversation_manager::ConversationManager;
|
||||
pub use conversation_manager::NewConversation;
|
||||
pub mod model_family;
|
||||
mod models;
|
||||
mod openai_model_info;
|
||||
mod openai_tools;
|
||||
pub mod plan_tool;
|
||||
mod project_doc;
|
||||
pub mod protocol;
|
||||
pub mod project_doc;
|
||||
mod rollout;
|
||||
pub(crate) mod safety;
|
||||
pub mod seatbelt;
|
||||
pub mod shell;
|
||||
pub mod spawn;
|
||||
pub mod terminal;
|
||||
mod tool_apply_patch;
|
||||
pub mod turn_diff_tracker;
|
||||
pub mod user_agent;
|
||||
mod user_notification;
|
||||
pub mod util;
|
||||
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
|
||||
pub use safety::get_platform_sandbox;
|
||||
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
|
||||
// `codex_core::protocol::...` references continue to work across the workspace.
|
||||
pub use codex_protocol::protocol;
|
||||
// Re-export protocol config enums to ensure call sites can use the same types
|
||||
// as those in the protocol crate when constructing protocol messages.
|
||||
pub use codex_protocol::config_types as protocol_config_types;
|
||||
|
||||
@@ -281,7 +281,6 @@ fn is_valid_mcp_server_name(server_name: &str) -> bool {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use mcp_types::ToolInputSchema;
|
||||
|
||||
@@ -4,13 +4,13 @@ use std::time::Instant;
|
||||
use tracing::error;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::McpInvocation;
|
||||
use crate::protocol::McpToolCallBeginEvent;
|
||||
use crate::protocol::McpToolCallEndEvent;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
|
||||
/// Handles the specified tool call dispatches the appropriate
|
||||
/// `McpToolCallBegin` and `McpToolCallEnd` events to the `Session`.
|
||||
|
||||
@@ -125,16 +125,18 @@ pub(crate) async fn append_entry(text: &str, session_id: &Uuid, config: &Config)
|
||||
/// times if the lock is currently held by another process. This prevents a
|
||||
/// potential indefinite wait while still giving other writers some time to
|
||||
/// finish their operation.
|
||||
async fn acquire_exclusive_lock_with_retry(file: &std::fs::File) -> Result<()> {
|
||||
async fn acquire_exclusive_lock_with_retry(file: &File) -> Result<()> {
|
||||
use tokio::time::sleep;
|
||||
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match fs2::FileExt::try_lock_exclusive(file) {
|
||||
match file.try_lock() {
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
|
||||
sleep(RETRY_SLEEP).await;
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
Err(e) => match e {
|
||||
std::fs::TryLockError::WouldBlock => {
|
||||
sleep(RETRY_SLEEP).await;
|
||||
}
|
||||
other => return Err(other.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,12 +261,14 @@ pub(crate) fn lookup(log_id: u64, offset: usize, config: &Config) -> Option<Hist
|
||||
#[cfg(unix)]
|
||||
fn acquire_shared_lock_with_retry(file: &File) -> Result<()> {
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match fs2::FileExt::try_lock_shared(file) {
|
||||
match file.try_lock_shared() {
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
|
||||
std::thread::sleep(RETRY_SLEEP);
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
Err(e) => match e {
|
||||
std::fs::TryLockError::WouldBlock => {
|
||||
std::thread::sleep(RETRY_SLEEP);
|
||||
}
|
||||
other => return Err(other.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ModelFamily {
|
||||
@@ -23,6 +25,10 @@ pub struct ModelFamily {
|
||||
// the model such that its description can be omitted.
|
||||
// See https://platform.openai.com/docs/guides/tools-local-shell
|
||||
pub uses_local_shell_tool: bool,
|
||||
|
||||
/// Present if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
}
|
||||
|
||||
macro_rules! model_family {
|
||||
@@ -36,6 +42,7 @@ macro_rules! model_family {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
};
|
||||
// apply overrides
|
||||
$(
|
||||
@@ -55,6 +62,7 @@ macro_rules! simple_model_family {
|
||||
needs_special_apply_patch_instructions: false,
|
||||
supports_reasoning_summaries: false,
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
})
|
||||
}};
|
||||
}
|
||||
@@ -78,15 +86,20 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
supports_reasoning_summaries: true,
|
||||
uses_local_shell_tool: true,
|
||||
)
|
||||
} else if slug.starts_with("codex-") {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
)
|
||||
} else if slug.starts_with("gpt-4.1") {
|
||||
model_family!(
|
||||
slug, "gpt-4.1",
|
||||
needs_special_apply_patch_instructions: true,
|
||||
)
|
||||
} else if slug.starts_with("gpt-oss") {
|
||||
model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function))
|
||||
} else if slug.starts_with("gpt-4o") {
|
||||
simple_model_family!(slug, "gpt-4o")
|
||||
} else if slug.starts_with("gpt-oss") {
|
||||
simple_model_family!(slug, "gpt-oss")
|
||||
} else if slug.starts_with("gpt-3.5") {
|
||||
simple_model_family!(slug, "gpt-3.5")
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
|
||||
@@ -167,10 +167,10 @@ impl ModelProviderInfo {
|
||||
|
||||
if let Some(env_headers) = &self.env_http_headers {
|
||||
for (header, env_var) in env_headers {
|
||||
if let Ok(val) = std::env::var(env_var) {
|
||||
if !val.trim().is_empty() {
|
||||
builder = builder.header(header, val);
|
||||
}
|
||||
if let Ok(val) = std::env::var(env_var)
|
||||
&& !val.trim().is_empty()
|
||||
{
|
||||
builder = builder.header(header, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -322,7 +322,6 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
|
||||
@@ -15,7 +15,8 @@ pub(crate) struct ModelInfo {
|
||||
}
|
||||
|
||||
pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
match model_family.slug.as_str() {
|
||||
let slug = model_family.slug.as_str();
|
||||
match slug {
|
||||
// OSS models have a 128k shared token pool.
|
||||
// Arbitrarily splitting it: 3/4 input context, 1/4 output.
|
||||
// https://openai.com/index/gpt-oss-model-card/
|
||||
@@ -78,8 +79,13 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
}),
|
||||
|
||||
"gpt-5" => Some(ModelInfo {
|
||||
context_window: 200_000,
|
||||
max_output_tokens: 100_000,
|
||||
context_window: 400_000,
|
||||
max_output_tokens: 128_000,
|
||||
}),
|
||||
|
||||
_ if slug.starts_with("codex-") => Some(ModelInfo {
|
||||
context_window: 400_000,
|
||||
max_output_tokens: 128_000,
|
||||
}),
|
||||
|
||||
_ => None,
|
||||
|
||||
@@ -9,6 +9,9 @@ use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
pub struct ResponsesApiTool {
|
||||
@@ -21,6 +24,20 @@ pub struct ResponsesApiTool {
|
||||
pub(crate) parameters: JsonSchema,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) format: FreeformToolFormat,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformToolFormat {
|
||||
pub(crate) r#type: String,
|
||||
pub(crate) syntax: String,
|
||||
pub(crate) definition: String,
|
||||
}
|
||||
|
||||
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
|
||||
/// Responses API.
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
@@ -30,6 +47,10 @@ pub(crate) enum OpenAiTool {
|
||||
Function(ResponsesApiTool),
|
||||
#[serde(rename = "local_shell")]
|
||||
LocalShell {},
|
||||
#[serde(rename = "web_search")]
|
||||
WebSearch {},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -37,35 +58,68 @@ pub enum ConfigShellToolType {
|
||||
DefaultShell,
|
||||
ShellWithRequest { sandbox_policy: SandboxPolicy },
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ToolsConfig {
|
||||
pub(crate) struct ToolsConfig {
|
||||
pub shell_type: ConfigShellToolType,
|
||||
pub plan_tool: bool,
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
pub web_search_request: bool,
|
||||
}
|
||||
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
pub(crate) use_streamable_shell_tool: bool,
|
||||
}
|
||||
|
||||
impl ToolsConfig {
|
||||
pub fn new(
|
||||
model_family: &ModelFamily,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
include_plan_tool: bool,
|
||||
) -> Self {
|
||||
let mut shell_type = if model_family.uses_local_shell_tool {
|
||||
pub fn new(params: &ToolsConfigParams) -> Self {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
use_streamable_shell_tool,
|
||||
} = params;
|
||||
let mut shell_type = if *use_streamable_shell_tool {
|
||||
ConfigShellToolType::StreamableShell
|
||||
} else if model_family.uses_local_shell_tool {
|
||||
ConfigShellToolType::LocalShell
|
||||
} else {
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) {
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest {
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
|
||||
Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function),
|
||||
None => {
|
||||
if *include_apply_patch_tool {
|
||||
Some(ApplyPatchToolType::Freeform)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
shell_type,
|
||||
plan_tool: include_plan_tool,
|
||||
plan_tool: *include_plan_tool,
|
||||
apply_patch_tool_type,
|
||||
web_search_request: *include_web_search_request,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -112,16 +166,20 @@ fn create_shell_tool() -> OpenAiTool {
|
||||
"command".to_string(),
|
||||
JsonSchema::Array {
|
||||
items: Box::new(JsonSchema::String { description: None }),
|
||||
description: None,
|
||||
description: Some("The command to execute".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String { description: None },
|
||||
JsonSchema::String {
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout".to_string(),
|
||||
JsonSchema::Number { description: None },
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
@@ -152,7 +210,7 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout".to_string(),
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
@@ -168,7 +226,7 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if ask_for_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -234,11 +292,16 @@ The shell tool is used to execute shell commands.
|
||||
},
|
||||
})
|
||||
}
|
||||
/// TODO(dylan): deprecate once we get rid of json tool
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct ApplyPatchToolArgs {
|
||||
pub(crate) input: String,
|
||||
}
|
||||
|
||||
/// Returns JSON values that are compatible with Function Calling in the
|
||||
/// Responses API:
|
||||
/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses
|
||||
pub(crate) fn create_tools_json_for_responses_api(
|
||||
pub fn create_tools_json_for_responses_api(
|
||||
tools: &Vec<OpenAiTool>,
|
||||
) -> crate::error::Result<Vec<serde_json::Value>> {
|
||||
let mut tools_json = Vec::new();
|
||||
@@ -336,11 +399,11 @@ fn sanitize_json_schema(value: &mut JsonValue) {
|
||||
}
|
||||
JsonValue::Object(map) => {
|
||||
// First, recursively sanitize known nested schema holders
|
||||
if let Some(props) = map.get_mut("properties") {
|
||||
if let Some(props_map) = props.as_object_mut() {
|
||||
for (_k, v) in props_map.iter_mut() {
|
||||
sanitize_json_schema(v);
|
||||
}
|
||||
if let Some(props) = map.get_mut("properties")
|
||||
&& let Some(props_map) = props.as_object_mut()
|
||||
{
|
||||
for (_k, v) in props_map.iter_mut() {
|
||||
sanitize_json_schema(v);
|
||||
}
|
||||
}
|
||||
if let Some(items) = map.get_mut("items") {
|
||||
@@ -360,18 +423,18 @@ fn sanitize_json_schema(value: &mut JsonValue) {
|
||||
.map(|s| s.to_string());
|
||||
|
||||
// If type is an array (union), pick first supported; else leave to inference
|
||||
if ty.is_none() {
|
||||
if let Some(JsonValue::Array(types)) = map.get("type") {
|
||||
for t in types {
|
||||
if let Some(tt) = t.as_str() {
|
||||
if matches!(
|
||||
tt,
|
||||
"object" | "array" | "string" | "number" | "integer" | "boolean"
|
||||
) {
|
||||
ty = Some(tt.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ty.is_none()
|
||||
&& let Some(JsonValue::Array(types)) = map.get("type")
|
||||
{
|
||||
for t in types {
|
||||
if let Some(tt) = t.as_str()
|
||||
&& matches!(
|
||||
tt,
|
||||
"object" | "array" | "string" | "number" | "integer" | "boolean"
|
||||
)
|
||||
{
|
||||
ty = Some(tt.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -449,14 +512,42 @@ pub(crate) fn get_openai_tools(
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
}
|
||||
ConfigShellToolType::StreamableShell => {
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_exec_command_tool_for_responses_api(),
|
||||
));
|
||||
tools.push(OpenAiTool::Function(
|
||||
crate::exec_command::create_write_stdin_tool_for_responses_api(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if config.plan_tool {
|
||||
tools.push(PLAN_TOOL.clone());
|
||||
}
|
||||
|
||||
if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type {
|
||||
match apply_patch_tool_type {
|
||||
ApplyPatchToolType::Freeform => {
|
||||
tools.push(create_apply_patch_freeform_tool());
|
||||
}
|
||||
ApplyPatchToolType::Function => {
|
||||
tools.push(create_apply_patch_json_tool());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.web_search_request {
|
||||
tools.push(OpenAiTool::WebSearch {});
|
||||
}
|
||||
|
||||
if let Some(mcp_tools) = mcp_tools {
|
||||
for (name, tool) in mcp_tools {
|
||||
// Ensure deterministic ordering to maximize prompt cache hits.
|
||||
// HashMap iteration order is non-deterministic, so sort by fully-qualified tool name.
|
||||
let mut entries: Vec<(String, mcp_types::Tool)> = mcp_tools.into_iter().collect();
|
||||
entries.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
for (name, tool) in entries.into_iter() {
|
||||
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
|
||||
Ok(converted_tool) => tools.push(OpenAiTool::Function(converted_tool)),
|
||||
Err(e) => {
|
||||
@@ -470,7 +561,6 @@ pub(crate) fn get_openai_tools(
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::expect_used)]
|
||||
mod tests {
|
||||
use crate::model_family::find_family_for_model;
|
||||
use mcp_types::ToolInputSchema;
|
||||
@@ -484,6 +574,8 @@ mod tests {
|
||||
.map(|tool| match tool {
|
||||
OpenAiTool::Function(ResponsesApiTool { name, .. }) => name,
|
||||
OpenAiTool::LocalShell {} => "local_shell",
|
||||
OpenAiTool::WebSearch {} => "web_search",
|
||||
OpenAiTool::Freeform(FreeformTool { name, .. }) => name,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -504,40 +596,49 @@ mod tests {
|
||||
fn test_get_openai_tools() {
|
||||
let model_family = find_family_for_model("codex-mini-latest")
|
||||
.expect("codex-mini-latest should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(&tools, &["local_shell", "update_plan"]);
|
||||
assert_eq_tool_names(&tools, &["local_shell", "update_plan", "web_search"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_default_shell() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
true,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let tools = get_openai_tools(&config, Some(HashMap::new()));
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "update_plan"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "update_plan", "web_search"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_mcp_tools() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
Some(HashMap::from([(
|
||||
@@ -559,8 +660,8 @@ mod tests {
|
||||
"number_property": { "type": "number" },
|
||||
},
|
||||
"required": [
|
||||
"string_property",
|
||||
"number_property"
|
||||
"string_property".to_string(),
|
||||
"number_property".to_string()
|
||||
],
|
||||
"additionalProperties": Some(false),
|
||||
},
|
||||
@@ -576,10 +677,13 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "test_server/do_something_cool"]);
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&["shell", "web_search", "test_server/do_something_cool"],
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tools[1],
|
||||
tools[2],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "test_server/do_something_cool".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -622,15 +726,93 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_openai_tools_mcp_tools_sorted_by_name() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
// Intentionally construct a map with keys that would sort alphabetically.
|
||||
let tools_map: HashMap<String, mcp_types::Tool> = HashMap::from([
|
||||
(
|
||||
"test_server/do".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "a".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("a".to_string()),
|
||||
},
|
||||
),
|
||||
(
|
||||
"test_server/something".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "b".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("b".to_string()),
|
||||
},
|
||||
),
|
||||
(
|
||||
"test_server/cool".to_string(),
|
||||
mcp_types::Tool {
|
||||
name: "c".to_string(),
|
||||
input_schema: ToolInputSchema {
|
||||
properties: Some(serde_json::json!({})),
|
||||
required: None,
|
||||
r#type: "object".to_string(),
|
||||
},
|
||||
output_schema: None,
|
||||
title: None,
|
||||
annotations: None,
|
||||
description: Some("c".to_string()),
|
||||
},
|
||||
),
|
||||
]);
|
||||
|
||||
let tools = get_openai_tools(&config, Some(tools_map));
|
||||
// Expect shell first, followed by MCP tools sorted by fully-qualified name.
|
||||
assert_eq_tool_names(
|
||||
&tools,
|
||||
&[
|
||||
"shell",
|
||||
"test_server/cool",
|
||||
"test_server/do",
|
||||
"test_server/something",
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mcp_tool_property_missing_type_defaults_to_string() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -655,10 +837,10 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/search"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/search"]);
|
||||
|
||||
assert_eq!(
|
||||
tools[1],
|
||||
tools[2],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/search".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -680,12 +862,15 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_integer_normalized_to_number() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -708,9 +893,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/paginate"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/paginate"]);
|
||||
assert_eq!(
|
||||
tools[1],
|
||||
tools[2],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/paginate".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -730,12 +915,15 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_array_without_items_gets_default_string_items() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -758,9 +946,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/tags"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/tags"]);
|
||||
assert_eq!(
|
||||
tools[1],
|
||||
tools[2],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/tags".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
@@ -783,12 +971,15 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mcp_tool_anyof_defaults_to_string() {
|
||||
let model_family = find_family_for_model("o3").expect("o3 should be a valid model family");
|
||||
let config = ToolsConfig::new(
|
||||
&model_family,
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::ReadOnly,
|
||||
false,
|
||||
);
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
use_streamable_shell_tool: false,
|
||||
});
|
||||
|
||||
let tools = get_openai_tools(
|
||||
&config,
|
||||
@@ -811,9 +1002,9 @@ mod tests {
|
||||
)])),
|
||||
);
|
||||
|
||||
assert_eq_tool_names(&tools, &["shell", "dash/value"]);
|
||||
assert_eq_tool_names(&tools, &["shell", "web_search", "dash/value"]);
|
||||
assert_eq!(
|
||||
tools[1],
|
||||
tools[2],
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "dash/value".to_string(),
|
||||
parameters: JsonSchema::Object {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +1,31 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::codex::Session;
|
||||
use crate::models::FunctionCallOutputPayload;
|
||||
use crate::models::ResponseInputItem;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
|
||||
// Use the canonical plan tool types from the protocol crate to ensure
|
||||
// type-identity matches events transported via `codex_protocol`.
|
||||
pub use codex_protocol::plan_tool::PlanItemArg;
|
||||
pub use codex_protocol::plan_tool::StepStatus;
|
||||
pub use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
|
||||
// Types for the TODO tool arguments matching codex-vscode/todo-mcp/src/main.rs
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum StepStatus {
|
||||
Pending,
|
||||
InProgress,
|
||||
Completed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct PlanItemArg {
|
||||
pub step: String,
|
||||
pub status: StepStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct UpdatePlanArgs {
|
||||
#[serde(default)]
|
||||
pub explanation: Option<String>,
|
||||
pub plan: Vec<PlanItemArg>,
|
||||
}
|
||||
|
||||
pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
|
||||
let mut plan_item_props = BTreeMap::new();
|
||||
plan_item_props.insert("step".to_string(), JsonSchema::String { description: None });
|
||||
plan_item_props.insert(
|
||||
"status".to_string(),
|
||||
JsonSchema::String { description: None },
|
||||
JsonSchema::String {
|
||||
description: Some("One of: pending, in_progress, completed".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
let plan_items_schema = JsonSchema::Array {
|
||||
@@ -63,17 +46,11 @@ pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "update_plan".to_string(),
|
||||
description: r#"Use the update_plan tool to keep the user updated on the current plan for the task.
|
||||
After understanding the user's task, call the update_plan tool with an initial plan. An example of a plan:
|
||||
1. Explore the codebase to find relevant files (status: in_progress)
|
||||
2. Implement the feature in the XYZ component (status: pending)
|
||||
3. Commit changes and make a pull request (status: pending)
|
||||
Each step should be a short, 1-sentence description.
|
||||
Until all the steps are finished, there should always be exactly one in_progress step in the plan.
|
||||
Call the update_plan tool whenever you finish a step, marking the completed step as `completed` and marking the next step as `in_progress`.
|
||||
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step.
|
||||
Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
|
||||
When all steps are completed, call update_plan one last time with all steps marked as `completed`."#.to_string(),
|
||||
description: r#"Updates the task plan.
|
||||
Provide an optional explanation and a list of plan items, each with a step and status.
|
||||
At most one step can be in_progress at a time.
|
||||
"#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
//! Project-level documentation discovery.
|
||||
//!
|
||||
//! Project-level documentation can be stored in a file named `AGENTS.md`.
|
||||
//! Currently, we include only the contents of the first file found as follows:
|
||||
//! Project-level documentation can be stored in files named `AGENTS.md`.
|
||||
//! We include the concatenation of all files found along the path from the
|
||||
//! repository root to the current working directory as follows:
|
||||
//!
|
||||
//! 1. Look for the doc file in the current working directory (as determined
|
||||
//! by the `Config`).
|
||||
//! 2. If not found, walk *upwards* until the Git repository root is reached
|
||||
//! (detected by the presence of a `.git` directory/file), or failing that,
|
||||
//! the filesystem root.
|
||||
//! 3. If the Git root is encountered, look for the doc file there. If it
|
||||
//! exists, the search stops – we do **not** walk past the Git root.
|
||||
//! 1. Determine the Git repository root by walking upwards from the current
|
||||
//! working directory until a `.git` directory or file is found. If no Git
|
||||
//! root is found, only the current working directory is considered.
|
||||
//! 2. Collect every `AGENTS.md` found from the repository root down to the
|
||||
//! current working directory (inclusive) and concatenate their contents in
|
||||
//! that order.
|
||||
//! 3. We do **not** walk past the Git root.
|
||||
|
||||
use crate::config::Config;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tracing::error;
|
||||
|
||||
@@ -26,7 +27,7 @@ const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n";
|
||||
/// Combines `Config::instructions` and `AGENTS.md` (if present) into a single
|
||||
/// string of instructions.
|
||||
pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
match find_project_doc(config).await {
|
||||
match read_project_docs(config).await {
|
||||
Ok(Some(project_doc)) => match &config.user_instructions {
|
||||
Some(original_instructions) => Some(format!(
|
||||
"{original_instructions}{PROJECT_DOC_SEPARATOR}{project_doc}"
|
||||
@@ -41,101 +42,139 @@ pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to locate and load the project documentation. Currently, the search
|
||||
/// starts from `Config::cwd`, but if we may want to consider other directories
|
||||
/// in the future, e.g., additional writable directories in the `SandboxPolicy`.
|
||||
/// Attempt to locate and load the project documentation.
|
||||
///
|
||||
/// On success returns `Ok(Some(contents))`. If no documentation file is found
|
||||
/// the function returns `Ok(None)`. Unexpected I/O failures bubble up as
|
||||
/// `Err` so callers can decide how to handle them.
|
||||
async fn find_project_doc(config: &Config) -> std::io::Result<Option<String>> {
|
||||
let max_bytes = config.project_doc_max_bytes;
|
||||
/// On success returns `Ok(Some(contents))` where `contents` is the
|
||||
/// concatenation of all discovered docs. If no documentation file is found the
|
||||
/// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so
|
||||
/// callers can decide how to handle them.
|
||||
pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String>> {
|
||||
let max_total = config.project_doc_max_bytes;
|
||||
|
||||
// Attempt to load from the working directory first.
|
||||
if let Some(doc) = load_first_candidate(&config.cwd, CANDIDATE_FILENAMES, max_bytes).await? {
|
||||
return Ok(Some(doc));
|
||||
if max_total == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Walk up towards the filesystem root, stopping once we encounter the Git
|
||||
// repository root. The presence of **either** a `.git` *file* or
|
||||
// *directory* counts.
|
||||
let mut dir = config.cwd.clone();
|
||||
let paths = discover_project_doc_paths(config)?;
|
||||
if paths.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Canonicalize the path so that we do not end up in an infinite loop when
|
||||
// `cwd` contains `..` components.
|
||||
let mut remaining: u64 = max_total as u64;
|
||||
let mut parts: Vec<String> = Vec::new();
|
||||
|
||||
for p in paths {
|
||||
if remaining == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let file = match tokio::fs::File::open(&p).await {
|
||||
Ok(f) => f,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let size = file.metadata().await?.len();
|
||||
let mut reader = tokio::io::BufReader::new(file).take(remaining);
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
reader.read_to_end(&mut data).await?;
|
||||
|
||||
if size > remaining {
|
||||
tracing::warn!(
|
||||
"Project doc `{}` exceeds remaining budget ({} bytes) - truncating.",
|
||||
p.display(),
|
||||
remaining,
|
||||
);
|
||||
}
|
||||
|
||||
let text = String::from_utf8_lossy(&data).to_string();
|
||||
if !text.trim().is_empty() {
|
||||
parts.push(text);
|
||||
remaining = remaining.saturating_sub(data.len() as u64);
|
||||
}
|
||||
}
|
||||
|
||||
if parts.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(parts.join("\n\n")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Discover the list of AGENTS.md files using the same search rules as
|
||||
/// `read_project_docs`, but return the file paths instead of concatenated
|
||||
/// contents. The list is ordered from repository root to the current working
|
||||
/// directory (inclusive). Symlinks are allowed. When `project_doc_max_bytes`
|
||||
/// is zero, returns an empty list.
|
||||
pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBuf>> {
|
||||
let mut dir = config.cwd.clone();
|
||||
if let Ok(canon) = dir.canonicalize() {
|
||||
dir = canon;
|
||||
}
|
||||
|
||||
while let Some(parent) = dir.parent() {
|
||||
// `.git` can be a *file* (for worktrees or submodules) or a *dir*.
|
||||
let git_marker = dir.join(".git");
|
||||
let git_exists = match tokio::fs::metadata(&git_marker).await {
|
||||
// Build chain from cwd upwards and detect git root.
|
||||
let mut chain: Vec<PathBuf> = vec![dir.clone()];
|
||||
let mut git_root: Option<PathBuf> = None;
|
||||
let mut cursor = dir.clone();
|
||||
while let Some(parent) = cursor.parent() {
|
||||
let git_marker = cursor.join(".git");
|
||||
let git_exists = match std::fs::metadata(&git_marker) {
|
||||
Ok(_) => true,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => false,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
if git_exists {
|
||||
// We are at the repo root – attempt one final load.
|
||||
if let Some(doc) = load_first_candidate(&dir, CANDIDATE_FILENAMES, max_bytes).await? {
|
||||
return Ok(Some(doc));
|
||||
}
|
||||
git_root = Some(cursor.clone());
|
||||
break;
|
||||
}
|
||||
|
||||
dir = parent.to_path_buf();
|
||||
chain.push(parent.to_path_buf());
|
||||
cursor = parent.to_path_buf();
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Attempt to load the first candidate file found in `dir`. Returns the file
|
||||
/// contents (truncated if it exceeds `max_bytes`) when successful.
|
||||
async fn load_first_candidate(
|
||||
dir: &Path,
|
||||
names: &[&str],
|
||||
max_bytes: usize,
|
||||
) -> std::io::Result<Option<String>> {
|
||||
for name in names {
|
||||
let candidate = dir.join(name);
|
||||
|
||||
let file = match tokio::fs::File::open(&candidate).await {
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
Ok(f) => f,
|
||||
};
|
||||
|
||||
let size = file.metadata().await?.len();
|
||||
|
||||
let reader = tokio::io::BufReader::new(file);
|
||||
let mut data = Vec::with_capacity(std::cmp::min(size as usize, max_bytes));
|
||||
let mut limited = reader.take(max_bytes as u64);
|
||||
limited.read_to_end(&mut data).await?;
|
||||
|
||||
if size as usize > max_bytes {
|
||||
tracing::warn!(
|
||||
"Project doc `{}` exceeds {max_bytes} bytes - truncating.",
|
||||
candidate.display(),
|
||||
);
|
||||
let search_dirs: Vec<PathBuf> = if let Some(root) = git_root {
|
||||
let mut dirs: Vec<PathBuf> = Vec::new();
|
||||
let mut saw_root = false;
|
||||
for p in chain.iter().rev() {
|
||||
if !saw_root {
|
||||
if p == &root {
|
||||
saw_root = true;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
dirs.push(p.clone());
|
||||
}
|
||||
dirs
|
||||
} else {
|
||||
vec![config.cwd.clone()]
|
||||
};
|
||||
|
||||
let contents = String::from_utf8_lossy(&data).to_string();
|
||||
if contents.trim().is_empty() {
|
||||
// Empty file – treat as not found.
|
||||
continue;
|
||||
let mut found: Vec<PathBuf> = Vec::new();
|
||||
for d in search_dirs {
|
||||
for name in CANDIDATE_FILENAMES {
|
||||
let candidate = d.join(name);
|
||||
match std::fs::symlink_metadata(&candidate) {
|
||||
Ok(md) => {
|
||||
let ft = md.file_type();
|
||||
// Allow regular files and symlinks; opening will later fail for dangling links.
|
||||
if ft.is_file() || ft.is_symlink() {
|
||||
found.push(candidate);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(Some(contents));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
Ok(found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
|
||||
use super::*;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config::ConfigToml;
|
||||
@@ -280,4 +319,32 @@ mod tests {
|
||||
|
||||
assert_eq!(res, Some(INSTRUCTIONS.to_string()));
|
||||
}
|
||||
|
||||
/// When both the repository root and the working directory contain
|
||||
/// AGENTS.md files, their contents are concatenated from root to cwd.
|
||||
#[tokio::test]
|
||||
async fn concatenates_root_and_cwd_docs() {
|
||||
let repo = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
// Simulate a git repository.
|
||||
std::fs::write(
|
||||
repo.path().join(".git"),
|
||||
"gitdir: /path/to/actual/git/dir\n",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Repo root doc.
|
||||
fs::write(repo.path().join("AGENTS.md"), "root doc").unwrap();
|
||||
|
||||
// Nested working directory with its own doc.
|
||||
let nested = repo.path().join("workspace/crate_a");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
fs::write(nested.join("AGENTS.md"), "crate doc").unwrap();
|
||||
|
||||
let mut cfg = make_config(&repo, 4096, None);
|
||||
cfg.cwd = nested;
|
||||
|
||||
let res = get_user_instructions(&cfg).await.expect("doc expected");
|
||||
assert_eq!(res, "root doc\n\ncrate doc");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ use uuid::Uuid;
|
||||
use crate::config::Config;
|
||||
use crate::git_info::GitInfo;
|
||||
use crate::git_info::collect_git_info;
|
||||
use crate::models::ResponseItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
const SESSIONS_SUBDIR: &str = "sessions";
|
||||
|
||||
@@ -132,6 +132,8 @@ impl RolloutRecorder {
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => filtered.push(item.clone()),
|
||||
ResponseItem::Other => {
|
||||
// These should never be serialized.
|
||||
@@ -194,6 +196,8 @@ impl RolloutRecorder {
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => items.push(item),
|
||||
ResponseItem::Other => {}
|
||||
},
|
||||
@@ -317,6 +321,8 @@ async fn rollout_writer(
|
||||
| ResponseItem::LocalShellCall { .. }
|
||||
| ResponseItem::FunctionCall { .. }
|
||||
| ResponseItem::FunctionCallOutput { .. }
|
||||
| ResponseItem::CustomToolCall { .. }
|
||||
| ResponseItem::CustomToolCallOutput { .. }
|
||||
| ResponseItem::Reasoning { .. } => {
|
||||
writer.write_line(&item).await?;
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub enum SafetyCheck {
|
||||
pub fn assess_patch_safety(
|
||||
action: &ApplyPatchAction,
|
||||
policy: AskForApproval,
|
||||
writable_roots: &[PathBuf],
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: &Path,
|
||||
) -> SafetyCheck {
|
||||
if action.is_empty() {
|
||||
@@ -45,7 +45,7 @@ pub fn assess_patch_safety(
|
||||
// is possible that paths in the patch are hard links to files outside the
|
||||
// writable roots, so we should still run `apply_patch` in a sandbox in that
|
||||
// case.
|
||||
if is_write_patch_constrained_to_writable_paths(action, writable_roots, cwd)
|
||||
if is_write_patch_constrained_to_writable_paths(action, sandbox_policy, cwd)
|
||||
|| policy == AskForApproval::OnFailure
|
||||
{
|
||||
// Only auto‑approve when we can actually enforce a sandbox. Otherwise
|
||||
@@ -171,13 +171,19 @@ pub fn get_platform_sandbox() -> Option<SandboxType> {
|
||||
|
||||
fn is_write_patch_constrained_to_writable_paths(
|
||||
action: &ApplyPatchAction,
|
||||
writable_roots: &[PathBuf],
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cwd: &Path,
|
||||
) -> bool {
|
||||
// Early‑exit if there are no declared writable roots.
|
||||
if writable_roots.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let writable_roots = match sandbox_policy {
|
||||
SandboxPolicy::ReadOnly => {
|
||||
return false;
|
||||
}
|
||||
SandboxPolicy::DangerFullAccess => {
|
||||
return true;
|
||||
}
|
||||
SandboxPolicy::WorkspaceWrite { .. } => sandbox_policy.get_writable_roots_with_cwd(cwd),
|
||||
};
|
||||
|
||||
// Normalize a path by removing `.` and resolving `..` without touching the
|
||||
// filesystem (works even if the file does not exist).
|
||||
@@ -209,15 +215,9 @@ fn is_write_patch_constrained_to_writable_paths(
|
||||
None => return false,
|
||||
};
|
||||
|
||||
writable_roots.iter().any(|root| {
|
||||
let root_abs = if root.is_absolute() {
|
||||
root.clone()
|
||||
} else {
|
||||
normalize(&cwd.join(root)).unwrap_or_else(|| cwd.join(root))
|
||||
};
|
||||
|
||||
abs.starts_with(&root_abs)
|
||||
})
|
||||
writable_roots
|
||||
.iter()
|
||||
.any(|writable_root| writable_root.is_path_writable(&abs))
|
||||
};
|
||||
|
||||
for (path, change) in action.changes() {
|
||||
@@ -231,10 +231,10 @@ fn is_write_patch_constrained_to_writable_paths(
|
||||
if !is_path_writable(path) {
|
||||
return false;
|
||||
}
|
||||
if let Some(dest) = move_path {
|
||||
if !is_path_writable(dest) {
|
||||
return false;
|
||||
}
|
||||
if let Some(dest) = move_path
|
||||
&& !is_path_writable(dest)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -245,40 +245,57 @@ fn is_write_patch_constrained_to_writable_paths(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_writable_roots_constraint() {
|
||||
let cwd = std::env::current_dir().unwrap();
|
||||
// Use a temporary directory as our workspace to avoid touching
|
||||
// the real current working directory.
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let cwd = tmp.path().to_path_buf();
|
||||
let parent = cwd.parent().unwrap().to_path_buf();
|
||||
|
||||
// Helper to build a single‑entry map representing a patch that adds a
|
||||
// file at `p`.
|
||||
// Helper to build a single‑entry patch that adds a file at `p`.
|
||||
let make_add_change = |p: PathBuf| ApplyPatchAction::new_add_for_test(&p, "".to_string());
|
||||
|
||||
let add_inside = make_add_change(cwd.join("inner.txt"));
|
||||
let add_outside = make_add_change(parent.join("outside.txt"));
|
||||
|
||||
// Policy limited to the workspace only; exclude system temp roots so
|
||||
// only `cwd` is writable by default.
|
||||
let policy_workspace_only = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
};
|
||||
|
||||
assert!(is_write_patch_constrained_to_writable_paths(
|
||||
&add_inside,
|
||||
&[PathBuf::from(".")],
|
||||
&policy_workspace_only,
|
||||
&cwd,
|
||||
));
|
||||
|
||||
let add_outside_2 = make_add_change(parent.join("outside.txt"));
|
||||
assert!(!is_write_patch_constrained_to_writable_paths(
|
||||
&add_outside_2,
|
||||
&[PathBuf::from(".")],
|
||||
&add_outside,
|
||||
&policy_workspace_only,
|
||||
&cwd,
|
||||
));
|
||||
|
||||
// With parent dir added as writable root, it should pass.
|
||||
// With the parent dir explicitly added as a writable root, the
|
||||
// outside write should be permitted.
|
||||
let policy_with_parent = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![parent.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
};
|
||||
assert!(is_write_patch_constrained_to_writable_paths(
|
||||
&add_outside,
|
||||
&[PathBuf::from("..")],
|
||||
&policy_with_parent,
|
||||
&cwd,
|
||||
))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -122,7 +122,6 @@ fn create_seatbelt_command_args(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![expect(clippy::expect_used)]
|
||||
use super::MACOS_SEATBELT_BASE_POLICY;
|
||||
use super::create_seatbelt_command_args;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
|
||||
@@ -1,14 +1,24 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use shlex;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct ZshShell {
|
||||
shell_path: String,
|
||||
zshrc_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct PowerShellConfig {
|
||||
exe: String, // Executable name or path, e.g. "pwsh" or "powershell.exe".
|
||||
bash_exe_fallback: Option<PathBuf>, // In case the model generates a bash command.
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub enum Shell {
|
||||
Zsh(ZshShell),
|
||||
PowerShell(PowerShellConfig),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
@@ -33,6 +43,61 @@ impl Shell {
|
||||
}
|
||||
Some(result)
|
||||
}
|
||||
Shell::PowerShell(ps) => {
|
||||
// If model generated a bash command, prefer a detected bash fallback
|
||||
if let Some(script) = strip_bash_lc(&command) {
|
||||
return match &ps.bash_exe_fallback {
|
||||
Some(bash) => Some(vec![
|
||||
bash.to_string_lossy().to_string(),
|
||||
"-lc".to_string(),
|
||||
script,
|
||||
]),
|
||||
|
||||
// No bash fallback → run the script under PowerShell.
|
||||
// It will likely fail (except for some simple commands), but the error
|
||||
// should give a clue to the model to fix upon retry that it's running under PowerShell.
|
||||
None => Some(vec![
|
||||
ps.exe.clone(),
|
||||
"-NoProfile".to_string(),
|
||||
"-Command".to_string(),
|
||||
script,
|
||||
]),
|
||||
};
|
||||
}
|
||||
|
||||
// Not a bash command. If model did not generate a PowerShell command,
|
||||
// turn it into a PowerShell command.
|
||||
let first = command.first().map(String::as_str);
|
||||
if first != Some(ps.exe.as_str()) {
|
||||
// TODO (CODEX_2900): Handle escaping newlines.
|
||||
if command.iter().any(|a| a.contains('\n') || a.contains('\r')) {
|
||||
return Some(command);
|
||||
}
|
||||
|
||||
let joined = shlex::try_join(command.iter().map(|s| s.as_str())).ok();
|
||||
return joined.map(|arg| {
|
||||
vec![
|
||||
ps.exe.clone(),
|
||||
"-NoProfile".to_string(),
|
||||
"-Command".to_string(),
|
||||
arg,
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
// Model generated a PowerShell command. Run it.
|
||||
Some(command)
|
||||
}
|
||||
Shell::Unknown => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn name(&self) -> Option<String> {
|
||||
match self {
|
||||
Shell::Zsh(zsh) => std::path::Path::new(&zsh.shell_path)
|
||||
.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string()),
|
||||
Shell::PowerShell(ps) => Some(ps.exe.clone()),
|
||||
Shell::Unknown => None,
|
||||
}
|
||||
}
|
||||
@@ -70,13 +135,13 @@ pub async fn default_user_shell() -> Shell {
|
||||
}
|
||||
let stdout = String::from_utf8_lossy(&o.stdout);
|
||||
for line in stdout.lines() {
|
||||
if let Some(shell_path) = line.strip_prefix("UserShell: ") {
|
||||
if shell_path.ends_with("/zsh") {
|
||||
return Shell::Zsh(ZshShell {
|
||||
shell_path: shell_path.to_string(),
|
||||
zshrc_path: format!("{home}/.zshrc"),
|
||||
});
|
||||
}
|
||||
if let Some(shell_path) = line.strip_prefix("UserShell: ")
|
||||
&& shell_path.ends_with("/zsh")
|
||||
{
|
||||
return Shell::Zsh(ZshShell {
|
||||
shell_path: shell_path.to_string(),
|
||||
zshrc_path: format!("{home}/.zshrc"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,11 +151,51 @@ pub async fn default_user_shell() -> Shell {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
#[cfg(all(not(target_os = "macos"), not(target_os = "windows")))]
|
||||
pub async fn default_user_shell() -> Shell {
|
||||
Shell::Unknown
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub async fn default_user_shell() -> Shell {
|
||||
use tokio::process::Command;
|
||||
|
||||
// Prefer PowerShell 7+ (`pwsh`) if available, otherwise fall back to Windows PowerShell.
|
||||
let has_pwsh = Command::new("pwsh")
|
||||
.arg("-NoLogo")
|
||||
.arg("-NoProfile")
|
||||
.arg("-Command")
|
||||
.arg("$PSVersionTable.PSVersion.Major")
|
||||
.output()
|
||||
.await
|
||||
.map(|o| o.status.success())
|
||||
.unwrap_or(false);
|
||||
let bash_exe = if Command::new("bash.exe")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.await
|
||||
.ok()
|
||||
.map(|o| o.status.success())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
which::which("bash.exe").ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if has_pwsh {
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: bash_exe,
|
||||
})
|
||||
} else {
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: bash_exe,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(target_os = "macos")]
|
||||
mod tests {
|
||||
@@ -98,7 +203,6 @@ mod tests {
|
||||
use std::process::Command;
|
||||
|
||||
#[tokio::test]
|
||||
#[expect(clippy::unwrap_used)]
|
||||
async fn test_current_shell_detects_zsh() {
|
||||
let shell = Command::new("sh")
|
||||
.arg("-c")
|
||||
@@ -129,7 +233,6 @@ mod tests {
|
||||
assert_eq!(actual_cmd, None);
|
||||
}
|
||||
|
||||
#[expect(clippy::unwrap_used)]
|
||||
#[tokio::test]
|
||||
async fn test_run_with_profile_escaping_and_execution() {
|
||||
let shell_path = "/bin/zsh";
|
||||
@@ -167,9 +270,6 @@ mod tests {
|
||||
for (input, expected_cmd, expected_output) in cases {
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::SandboxType;
|
||||
@@ -219,7 +319,6 @@ mod tests {
|
||||
justification: None,
|
||||
},
|
||||
SandboxType::None,
|
||||
Arc::new(Notify::new()),
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
&None,
|
||||
None,
|
||||
@@ -237,3 +336,97 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(target_os = "windows")]
|
||||
mod tests_windows {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_default_shell_invocation_powershell() {
|
||||
let cases = vec![
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: None,
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: None,
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["powershell.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["bash", "-lc", "echo hello"],
|
||||
vec!["bash.exe", "-lc", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec![
|
||||
"bash",
|
||||
"-lc",
|
||||
"apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: destination_file.txt\n-original content\n+modified content\n*** End Patch\nEOF",
|
||||
],
|
||||
vec![
|
||||
"bash.exe",
|
||||
"-lc",
|
||||
"apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: destination_file.txt\n-original content\n+modified content\n*** End Patch\nEOF",
|
||||
],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["echo", "hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "pwsh.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"],
|
||||
),
|
||||
(
|
||||
// TODO (CODEX_2900): Handle escaping newlines for powershell invocation.
|
||||
Shell::PowerShell(PowerShellConfig {
|
||||
exe: "powershell.exe".to_string(),
|
||||
bash_exe_fallback: Some(PathBuf::from("bash.exe")),
|
||||
}),
|
||||
vec![
|
||||
"codex-mcp-server.exe",
|
||||
"--codex-run-as-apply-patch",
|
||||
"*** Begin Patch\n*** Update File: C:\\Users\\person\\destination_file.txt\n-original content\n+modified content\n*** End Patch",
|
||||
],
|
||||
vec![
|
||||
"codex-mcp-server.exe",
|
||||
"--codex-run-as-apply-patch",
|
||||
"*** Begin Patch\n*** Update File: C:\\Users\\person\\destination_file.txt\n-original content\n+modified content\n*** End Patch",
|
||||
],
|
||||
),
|
||||
];
|
||||
|
||||
for (shell, input, expected_cmd) in cases {
|
||||
let actual_cmd = shell
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
assert_eq!(
|
||||
actual_cmd,
|
||||
Some(expected_cmd.iter().map(|s| s.to_string()).collect())
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
72
codex-rs/core/src/terminal.rs
Normal file
72
codex-rs/core/src/terminal.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
static TERMINAL: OnceLock<String> = OnceLock::new();
|
||||
|
||||
pub fn user_agent() -> String {
|
||||
TERMINAL.get_or_init(detect_terminal).to_string()
|
||||
}
|
||||
|
||||
/// Sanitize a header value to be used in a User-Agent string.
|
||||
///
|
||||
/// This function replaces any characters that are not allowed in a User-Agent string with an underscore.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `value` - The value to sanitize.
|
||||
fn is_valid_header_value_char(c: char) -> bool {
|
||||
c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.' || c == '/'
|
||||
}
|
||||
|
||||
fn sanitize_header_value(value: String) -> String {
|
||||
value.replace(|c| !is_valid_header_value_char(c), "_")
|
||||
}
|
||||
|
||||
fn detect_terminal() -> String {
|
||||
sanitize_header_value(
|
||||
if let Ok(tp) = std::env::var("TERM_PROGRAM")
|
||||
&& !tp.trim().is_empty()
|
||||
{
|
||||
let ver = std::env::var("TERM_PROGRAM_VERSION").ok();
|
||||
match ver {
|
||||
Some(v) if !v.trim().is_empty() => format!("{tp}/{v}"),
|
||||
_ => tp,
|
||||
}
|
||||
} else if let Ok(v) = std::env::var("WEZTERM_VERSION") {
|
||||
if !v.trim().is_empty() {
|
||||
format!("WezTerm/{v}")
|
||||
} else {
|
||||
"WezTerm".to_string()
|
||||
}
|
||||
} else if std::env::var("KITTY_WINDOW_ID").is_ok()
|
||||
|| std::env::var("TERM")
|
||||
.map(|t| t.contains("kitty"))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
"kitty".to_string()
|
||||
} else if std::env::var("ALACRITTY_SOCKET").is_ok()
|
||||
|| std::env::var("TERM")
|
||||
.map(|t| t == "alacritty")
|
||||
.unwrap_or(false)
|
||||
{
|
||||
"Alacritty".to_string()
|
||||
} else if let Ok(v) = std::env::var("KONSOLE_VERSION") {
|
||||
if !v.trim().is_empty() {
|
||||
format!("Konsole/{v}")
|
||||
} else {
|
||||
"Konsole".to_string()
|
||||
}
|
||||
} else if std::env::var("GNOME_TERMINAL_SCREEN").is_ok() {
|
||||
return "gnome-terminal".to_string();
|
||||
} else if let Ok(v) = std::env::var("VTE_VERSION") {
|
||||
if !v.trim().is_empty() {
|
||||
format!("VTE/{v}")
|
||||
} else {
|
||||
"VTE".to_string()
|
||||
}
|
||||
} else if std::env::var("WT_SESSION").is_ok() {
|
||||
return "WindowsTerminal".to_string();
|
||||
} else {
|
||||
std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string())
|
||||
},
|
||||
)
|
||||
}
|
||||
145
codex-rs/core/src/tool_apply_patch.rs
Normal file
145
codex-rs/core/src/tool_apply_patch.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::openai_tools::FreeformTool;
|
||||
use crate::openai_tools::FreeformToolFormat;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct ApplyPatchToolArgs {
|
||||
pub(crate) input: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ApplyPatchToolType {
|
||||
Freeform,
|
||||
Function,
|
||||
}
|
||||
|
||||
/// Returns a custom tool that can be used to edit files. Well-suited for GPT-5 models
|
||||
/// https://platform.openai.com/docs/guides/function-calling#custom-tools
|
||||
pub(crate) fn create_apply_patch_freeform_tool() -> OpenAiTool {
|
||||
OpenAiTool::Freeform(FreeformTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: "Use the `apply_patch` tool to edit files".to_string(),
|
||||
format: FreeformToolFormat {
|
||||
r#type: "grammar".to_string(),
|
||||
syntax: "lark".to_string(),
|
||||
definition: r#"start: begin_patch hunk+ end_patch
|
||||
begin_patch: "*** Begin Patch" LF
|
||||
end_patch: "*** End Patch" LF?
|
||||
|
||||
hunk: add_hunk | delete_hunk | update_hunk
|
||||
add_hunk: "*** Add File: " filename LF add_line+
|
||||
delete_hunk: "*** Delete File: " filename LF
|
||||
update_hunk: "*** Update File: " filename LF change_move? change?
|
||||
|
||||
filename: /(.+)/
|
||||
add_line: "+" /(.+)/ LF -> line
|
||||
|
||||
change_move: "*** Move to: " filename LF
|
||||
change: (change_context | change_line)+ eof_line?
|
||||
change_context: ("@@" | "@@ " /(.+)/) LF
|
||||
change_line: ("+" | "-" | " ") /(.+)/ LF
|
||||
eof_line: "*** End of File" LF
|
||||
|
||||
%import common.LF
|
||||
"#
|
||||
.to_string(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a json tool that can be used to edit files. Should only be used with gpt-oss models
|
||||
pub(crate) fn create_apply_patch_json_tool() -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"input".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some(r#"The entire contents of the apply_patch command"#.to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: r#"Use the `apply_patch` tool to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
*** Begin Patch
|
||||
[ one or more file sections ]
|
||||
*** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
*** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by *** Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
For instructions on [context_before] and [context_after]:
|
||||
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines.
|
||||
- If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have:
|
||||
@@ class BaseClass
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance:
|
||||
|
||||
@@ class BaseClass
|
||||
@@ def method():
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
The full grammar definition is below:
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "*** Begin Patch" NEWLINE
|
||||
End := "*** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "*** Delete File: " path NEWLINE
|
||||
UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "*** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.txt
|
||||
+Hello world
|
||||
*** Update File: src/app.py
|
||||
*** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
"#
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["input".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -466,7 +466,6 @@ fn is_windows_drive_or_unc_root(p: &std::path::Path) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
|
||||
37
codex-rs/core/src/user_agent.rs
Normal file
37
codex-rs/core/src/user_agent.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
|
||||
|
||||
pub fn get_codex_user_agent(originator: Option<&str>) -> String {
|
||||
let build_version = env!("CARGO_PKG_VERSION");
|
||||
let os_info = os_info::get();
|
||||
format!(
|
||||
"{}/{build_version} ({} {}; {}) {}",
|
||||
originator.unwrap_or(DEFAULT_ORIGINATOR),
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
crate::terminal::user_agent()
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_codex_user_agent() {
|
||||
let user_agent = get_codex_user_agent(None);
|
||||
assert!(user_agent.starts_with("codex_cli_rs/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(target_os = "macos")]
|
||||
fn test_macos() {
|
||||
use regex_lite::Regex;
|
||||
let user_agent = get_codex_user_agent(None);
|
||||
let re = Regex::new(
|
||||
r"^codex_cli_rs/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\) (\S+)$",
|
||||
)
|
||||
.unwrap();
|
||||
assert!(re.is_match(&user_agent));
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ pub(crate) enum UserNotification {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,32 +1,11 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tokio::sync::Notify;
|
||||
use tracing::debug;
|
||||
|
||||
const INITIAL_DELAY_MS: u64 = 200;
|
||||
const BACKOFF_FACTOR: f64 = 2.0;
|
||||
|
||||
/// Make a CancellationToken that is fulfilled when SIGINT occurs.
|
||||
pub fn notify_on_sigint() -> Arc<Notify> {
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
tokio::spawn({
|
||||
let notify = Arc::clone(¬ify);
|
||||
async move {
|
||||
loop {
|
||||
tokio::signal::ctrl_c().await.ok();
|
||||
debug!("Keyboard interrupt");
|
||||
notify.notify_waiters();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
notify
|
||||
}
|
||||
|
||||
pub(crate) fn backoff(attempt: u64) -> Duration {
|
||||
let exp = BACKOFF_FACTOR.powi(attempt.saturating_sub(1) as i32);
|
||||
let base = (INITIAL_DELAY_MS as f64 * exp) as u64;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user